From ee45ca581fe56f75760649fe552be8ef1315c37c Mon Sep 17 00:00:00 2001 From: ioanna Date: Mon, 2 Aug 2021 11:20:26 +0300 Subject: [PATCH 1/5] Migrating to v5 modules --- Gopkg.lock | 381 - Gopkg.toml | 3 - go.mod | 9 + go.sum | 1133 + server/command.go | 4 +- server/command_test.go | 6 +- server/configuration.go | 2 +- server/configuration_test.go | 6 +- server/http.go | 4 +- server/http_test.go | 6 +- server/main.go | 2 +- server/plugin.go | 4 +- server/plugin_test.go | 6 +- vendor/github.com/blang/semver/.travis.yml | 14 +- vendor/github.com/blang/semver/README.md | 6 +- vendor/github.com/blang/semver/go.mod | 1 - vendor/github.com/blang/semver/range.go | 2 +- vendor/github.com/blang/semver/semver.go | 45 +- vendor/github.com/blang/semver/sql.go | 2 +- .../disintegration/imaging/.travis.yml | 12 + .../github.com/disintegration/imaging/LICENSE | 21 + .../disintegration/imaging/README.md | 226 + .../disintegration/imaging/adjust.go | 253 + .../disintegration/imaging/convolution.go | 148 + .../github.com/disintegration/imaging/doc.go | 7 + .../disintegration/imaging/effects.go | 169 + .../github.com/disintegration/imaging/go.mod | 3 + .../github.com/disintegration/imaging/go.sum | 3 + .../disintegration/imaging/histogram.go | 52 + .../github.com/disintegration/imaging/io.go | 444 + .../disintegration/imaging/resize.go | 595 + .../disintegration/imaging/scanner.go | 285 + .../disintegration/imaging/tools.go | 249 + .../disintegration/imaging/transform.go | 268 + .../disintegration/imaging/utils.go | 167 + vendor/github.com/fatih/color/LICENSE.md | 20 + vendor/github.com/fatih/color/README.md | 173 + vendor/github.com/fatih/color/color.go | 603 + vendor/github.com/fatih/color/doc.go | 133 + vendor/github.com/fatih/color/go.mod | 8 + vendor/github.com/fatih/color/go.sum | 7 + .../github.com/francoispqt/gojay/.gitignore | 5 + .../github.com/francoispqt/gojay/.travis.yml | 15 + .../github.com/francoispqt/gojay/Gopkg.lock | 163 + .../github.com/francoispqt/gojay/Gopkg.toml | 23 + vendor/github.com/francoispqt/gojay/LICENSE | 21 + vendor/github.com/francoispqt/gojay/Makefile | 11 + vendor/github.com/francoispqt/gojay/README.md | 855 + vendor/github.com/francoispqt/gojay/decode.go | 386 + .../francoispqt/gojay/decode_array.go | 247 + .../francoispqt/gojay/decode_bool.go | 241 + .../francoispqt/gojay/decode_embedded_json.go | 85 + .../francoispqt/gojay/decode_interface.go | 130 + .../francoispqt/gojay/decode_number.go | 118 + .../francoispqt/gojay/decode_number_float.go | 516 + .../francoispqt/gojay/decode_number_int.go | 1338 + .../francoispqt/gojay/decode_number_uint.go | 715 + .../francoispqt/gojay/decode_object.go | 407 + .../francoispqt/gojay/decode_pool.go | 64 + .../francoispqt/gojay/decode_slice.go | 89 + .../francoispqt/gojay/decode_sqlnull.go | 157 + .../francoispqt/gojay/decode_stream.go | 115 + .../francoispqt/gojay/decode_stream_pool.go | 59 + .../francoispqt/gojay/decode_string.go | 260 + .../gojay/decode_string_unicode.go | 98 + .../francoispqt/gojay/decode_time.go | 53 + .../francoispqt/gojay/decode_unsafe.go | 120 + vendor/github.com/francoispqt/gojay/encode.go | 202 + .../francoispqt/gojay/encode_array.go | 212 + .../francoispqt/gojay/encode_bool.go | 164 + .../francoispqt/gojay/encode_builder.go | 65 + .../francoispqt/gojay/encode_embedded_json.go | 93 + .../francoispqt/gojay/encode_interface.go | 173 + .../francoispqt/gojay/encode_null.go | 39 + .../francoispqt/gojay/encode_number.go | 1 + .../francoispqt/gojay/encode_number_float.go | 368 + .../francoispqt/gojay/encode_number_int.go | 500 + .../francoispqt/gojay/encode_number_uint.go | 362 + .../francoispqt/gojay/encode_object.go | 400 + .../francoispqt/gojay/encode_pool.go | 50 + .../francoispqt/gojay/encode_slice.go | 113 + .../francoispqt/gojay/encode_sqlnull.go | 377 + .../francoispqt/gojay/encode_stream.go | 205 + .../francoispqt/gojay/encode_stream_pool.go | 38 + .../francoispqt/gojay/encode_string.go | 186 + .../francoispqt/gojay/encode_time.go | 68 + vendor/github.com/francoispqt/gojay/errors.go | 88 + vendor/github.com/francoispqt/gojay/go.mod | 24 + vendor/github.com/francoispqt/gojay/go.sum | 182 + vendor/github.com/francoispqt/gojay/gojay.go | 10 + vendor/github.com/francoispqt/gojay/gojay.png | Bin 0 -> 44163 bytes .../fsnotify/fsnotify/.editorconfig | 9 +- .../fsnotify/fsnotify/.gitattributes | 1 + .../github.com/fsnotify/fsnotify/.travis.yml | 20 +- vendor/github.com/fsnotify/fsnotify/LICENSE | 2 +- vendor/github.com/fsnotify/fsnotify/README.md | 71 +- .../github.com/fsnotify/fsnotify/fsnotify.go | 4 +- vendor/github.com/fsnotify/fsnotify/go.mod | 5 + vendor/github.com/fsnotify/fsnotify/go.sum | 2 + .../fsnotify/fsnotify/inotify_poller.go | 4 +- .../fsnotify/fsnotify/open_mode_bsd.go | 2 +- .../fsnotify/fsnotify/open_mode_darwin.go | 2 +- .../go-asn1-ber/asn1-ber/.travis.yml | 43 + .../go-asn1-ber/asn1-ber}/LICENSE | 0 .../go-asn1-ber/asn1-ber}/README.md | 0 .../go-asn1-ber/asn1-ber}/ber.go | 208 +- .../go-asn1-ber/asn1-ber}/content_int.go | 2 +- .../go-asn1-ber/asn1-ber/generalizedTime.go | 105 + vendor/github.com/go-asn1-ber/asn1-ber/go.mod | 3 + .../go-asn1-ber/asn1-ber}/header.go | 19 +- .../go-asn1-ber/asn1-ber}/identifier.go | 0 .../go-asn1-ber/asn1-ber}/length.go | 12 +- .../github.com/go-asn1-ber/asn1-ber/real.go | 157 + .../go-asn1-ber/asn1-ber}/util.go | 2 +- .../golang/protobuf/proto/buffer.go | 324 + .../github.com/golang/protobuf/proto/clone.go | 253 - .../golang/protobuf/proto/decode.go | 427 - .../golang/protobuf/proto/defaults.go | 63 + .../golang/protobuf/proto/deprecated.go | 126 +- .../golang/protobuf/proto/discard.go | 356 +- .../golang/protobuf/proto/encode.go | 203 - .../github.com/golang/protobuf/proto/equal.go | 301 - .../golang/protobuf/proto/extensions.go | 771 +- .../github.com/golang/protobuf/proto/lib.go | 965 - .../golang/protobuf/proto/message_set.go | 181 - .../golang/protobuf/proto/pointer_reflect.go | 360 - .../golang/protobuf/proto/pointer_unsafe.go | 313 - .../golang/protobuf/proto/properties.go | 648 +- .../github.com/golang/protobuf/proto/proto.go | 167 + .../golang/protobuf/proto/registry.go | 323 + .../golang/protobuf/proto/table_marshal.go | 2776 - .../golang/protobuf/proto/table_merge.go | 654 - .../golang/protobuf/proto/table_unmarshal.go | 2053 - .../github.com/golang/protobuf/proto/text.go | 843 - .../golang/protobuf/proto/text_decode.go | 801 + .../golang/protobuf/proto/text_encode.go | 560 + .../golang/protobuf/proto/text_parser.go | 880 - .../github.com/golang/protobuf/proto/wire.go | 78 + .../golang/protobuf/proto/wrappers.go | 34 + .../protoc-gen-go/descriptor/descriptor.pb.go | 200 + .../github.com/golang/protobuf/ptypes/any.go | 214 +- .../golang/protobuf/ptypes/any/any.pb.go | 230 +- .../golang/protobuf/ptypes/any/any.proto | 154 - .../github.com/golang/protobuf/ptypes/doc.go | 37 +- .../golang/protobuf/ptypes/duration.go | 116 +- .../protobuf/ptypes/duration/duration.pb.go | 192 +- .../protobuf/ptypes/duration/duration.proto | 117 - .../golang/protobuf/ptypes/empty/empty.pb.go | 62 + .../golang/protobuf/ptypes/timestamp.go | 109 +- .../protobuf/ptypes/timestamp/timestamp.pb.go | 211 +- .../protobuf/ptypes/timestamp/timestamp.proto | 135 - vendor/github.com/google/uuid/README.md | 2 +- vendor/github.com/google/uuid/marshal.go | 7 +- vendor/github.com/google/uuid/version1.go | 12 +- vendor/github.com/google/uuid/version4.go | 7 +- vendor/github.com/gorilla/websocket/README.md | 2 +- vendor/github.com/gorilla/websocket/conn.go | 14 +- vendor/github.com/gorilla/websocket/doc.go | 6 +- vendor/github.com/gorilla/websocket/go.sum | 2 - .../github.com/gorilla/websocket/prepared.go | 4 +- vendor/github.com/hashicorp/errwrap/LICENSE | 354 + vendor/github.com/hashicorp/errwrap/README.md | 89 + .../github.com/hashicorp/errwrap/errwrap.go | 178 + vendor/github.com/hashicorp/errwrap/go.mod | 1 + .../github.com/hashicorp/go-hclog/README.md | 2 +- .../hashicorp/go-hclog/colorize_unix.go | 27 + .../hashicorp/go-hclog/colorize_windows.go | 33 + .../github.com/hashicorp/go-hclog/exclude.go | 71 + .../github.com/hashicorp/go-hclog/global.go | 14 + vendor/github.com/hashicorp/go-hclog/go.mod | 5 + vendor/github.com/hashicorp/go-hclog/go.sum | 12 + .../hashicorp/go-hclog/interceptlogger.go | 235 + .../hashicorp/go-hclog/intlogger.go | 244 +- .../github.com/hashicorp/go-hclog/logger.go | 173 +- .../hashicorp/go-hclog/nulllogger.go | 6 + .../github.com/hashicorp/go-hclog/stdlog.go | 68 +- .../github.com/hashicorp/go-hclog/writer.go | 20 +- .../hashicorp/go-multierror/.travis.yml | 12 + .../hashicorp/go-multierror/LICENSE | 353 + .../hashicorp/go-multierror/Makefile | 31 + .../hashicorp/go-multierror/README.md | 131 + .../hashicorp/go-multierror/append.go | 41 + .../hashicorp/go-multierror/flatten.go | 26 + .../hashicorp/go-multierror/format.go | 27 + .../github.com/hashicorp/go-multierror/go.mod | 5 + .../github.com/hashicorp/go-multierror/go.sum | 2 + .../hashicorp/go-multierror/group.go | 38 + .../hashicorp/go-multierror/multierror.go | 118 + .../hashicorp/go-multierror/prefix.go | 37 + .../hashicorp/go-multierror/sort.go | 16 + .../github.com/hashicorp/go-plugin/README.md | 5 - .../github.com/hashicorp/go-plugin/client.go | 34 +- vendor/github.com/hashicorp/go-plugin/go.mod | 16 +- vendor/github.com/hashicorp/go-plugin/go.sum | 72 +- .../hashicorp/go-plugin/grpc_client.go | 8 +- .../hashicorp/go-plugin/grpc_server.go | 19 +- .../hashicorp/go-plugin/grpc_stdio.go | 207 + .../go-plugin/internal/plugin/gen.go | 2 +- .../internal/plugin/grpc_broker.pb.go | 48 +- .../internal/plugin/grpc_broker.proto | 2 - .../internal/plugin/grpc_controller.pb.go | 42 +- .../internal/plugin/grpc_stdio.pb.go | 233 + .../internal/plugin/grpc_stdio.proto | 30 + .../github.com/hashicorp/go-plugin/server.go | 258 +- .../github.com/hashicorp/go-plugin/testing.go | 2 +- vendor/github.com/hashicorp/yamux/addr.go | 2 +- vendor/github.com/hashicorp/yamux/stream.go | 6 +- vendor/github.com/mattermost/ldap/.travis.yml | 7 +- vendor/github.com/mattermost/ldap/README.md | 37 +- vendor/github.com/mattermost/ldap/add.go | 53 +- vendor/github.com/mattermost/ldap/bind.go | 83 +- vendor/github.com/mattermost/ldap/client.go | 18 +- vendor/github.com/mattermost/ldap/compare.go | 55 +- vendor/github.com/mattermost/ldap/conn.go | 22 +- vendor/github.com/mattermost/ldap/control.go | 2 +- vendor/github.com/mattermost/ldap/debug.go | 19 +- vendor/github.com/mattermost/ldap/del.go | 48 +- vendor/github.com/mattermost/ldap/dn.go | 2 +- vendor/github.com/mattermost/ldap/error.go | 6 +- vendor/github.com/mattermost/ldap/filter.go | 2 +- vendor/github.com/mattermost/ldap/go.mod | 5 + vendor/github.com/mattermost/ldap/go.sum | 4 + vendor/github.com/mattermost/ldap/ldap.go | 5 +- vendor/github.com/mattermost/ldap/moddn.go | 47 +- vendor/github.com/mattermost/ldap/modify.go | 70 +- .../mattermost/ldap/passwdmodify.go | 64 +- vendor/github.com/mattermost/ldap/request.go | 66 + vendor/github.com/mattermost/ldap/search.go | 75 +- vendor/github.com/mattermost/logr/.gitignore | 36 + vendor/github.com/mattermost/logr/.travis.yml | 4 + vendor/github.com/mattermost/logr/LICENSE | 21 + vendor/github.com/mattermost/logr/README.md | 193 + vendor/github.com/mattermost/logr/config.go | 11 + vendor/github.com/mattermost/logr/const.go | 34 + vendor/github.com/mattermost/logr/filter.go | 26 + .../github.com/mattermost/logr/format/json.go | 273 + .../mattermost/logr/format/plain.go | 75 + .../github.com/mattermost/logr/formatter.go | 119 + vendor/github.com/mattermost/logr/go.mod | 11 + vendor/github.com/mattermost/logr/go.sum | 174 + .../github.com/mattermost/logr/levelcache.go | 98 + .../github.com/mattermost/logr/levelcustom.go | 45 + vendor/github.com/mattermost/logr/levelstd.go | 37 + vendor/github.com/mattermost/logr/logger.go | 218 + vendor/github.com/mattermost/logr/logr.go | 664 + vendor/github.com/mattermost/logr/logrec.go | 189 + vendor/github.com/mattermost/logr/metrics.go | 117 + vendor/github.com/mattermost/logr/target.go | 299 + .../github.com/mattermost/logr/target/file.go | 87 + .../mattermost/logr/target/syslog.go | 89 + .../mattermost/logr/target/writer.go | 40 + vendor/github.com/mattermost/logr/timeout.go | 34 + .../mattermost-server/mlog/default.go | 50 - .../mattermost-server/mlog/global.go | 44 - .../mattermost/mattermost-server/mlog/log.go | 178 - .../mattermost-server/model/channel_search.go | 30 - .../mattermost-server/model/command_args.go | 35 - .../mattermost-server/model/file_info.go | 188 - .../mattermost-server/model/group.go | 155 - .../mattermost-server/model/ldap.go | 8 - .../mattermost-server/model/mfa_secret.go | 25 - .../mattermost-server/model/migration.go | 8 - .../model/push_notification.go | 95 - .../mattermost-server/model/role.go | 419 - .../mattermost-server/model/saml.go | 37 - .../mattermost-server/model/search_params.go | 230 - .../mattermost-server/model/session.go | 167 - .../mattermost-server/model/system.go | 52 - .../mattermost-server/model/team_member.go | 103 - .../mattermost-server/model/team_search.go | 35 - .../mattermost-server/model/user_count.go | 18 - .../model/websocket_client.go | 195 - .../model/websocket_message.go | 166 - .../mattermost-server/plugin/health_check.go | 146 - .../mattermost-server/plugin/helpers.go | 36 - .../mattermost-server/plugin/helpers_bots.go | 74 - .../mattermost-server/plugin/helpers_kv.go | 85 - .../plugin/plugintest/helpers.go | 104 - .../scripts/license-check.sh | 28 - .../mattermost/mattermost-server/utils/api.go | 76 - .../mattermost-server/utils/extract.go | 76 - .../mattermost/mattermost-server/utils/lru.go | 204 - .../mattermost-server/utils/path.go | 15 - .../utils/test_files_compiler.go | 60 - .../mattermost-server/utils/utils.go | 100 - .../mattermost-server/{ => v5}/LICENSE.txt | 2 +- .../mattermost-server/{ => v5}/NOTICE.txt | 716 +- .../v5/einterfaces/account_migration.go | 13 + .../mattermost-server/v5/einterfaces/cloud.go | 23 + .../v5/einterfaces/cluster.go | 30 + .../v5/einterfaces/compliance.go | 13 + .../v5/einterfaces/data_retention.go | 12 + .../mattermost-server/v5/einterfaces/ldap.go | 26 + .../v5/einterfaces/message_export.go | 15 + .../v5/einterfaces/metrics.go | 69 + .../mattermost-server/v5/einterfaces/mfa.go | 15 + .../v5/einterfaces/notification.go | 13 + .../v5/einterfaces/oauthproviders.go | 30 + .../mattermost-server/v5/einterfaces/saml.go | 15 + .../mattermost-server/v5/mlog/default.go | 95 + .../mattermost-server/v5/mlog/errors.go | 32 + .../mattermost-server/v5/mlog/global.go | 95 + .../mattermost-server/v5/mlog/levels.go | 39 + .../mattermost-server/v5/mlog/log.go | 342 + .../mattermost-server/v5/mlog/logr.go | 247 + .../mattermost-server/{ => v5}/mlog/stdlog.go | 4 +- .../mattermost-server/{ => v5}/mlog/sugar.go | 4 +- .../mattermost-server/v5/mlog/syslog.go | 142 + .../mattermost-server/v5/mlog/tcp.go | 273 + .../v5/mlog/test-tls-client-cert.pem | 43 + .../{ => v5}/mlog/testing.go | 13 +- .../{ => v5}/model/access.go | 9 +- .../{ => v5}/model/analytics_row.go | 26 +- .../mattermost-server/v5/model/at_mentions.go | 47 + .../mattermost-server/{ => v5}/model/audit.go | 2 +- .../mattermost-server/v5/model/auditconv.go | 669 + .../{ => v5}/model/audits.go | 11 +- .../{ => v5}/model/authorize.go | 8 +- .../mattermost-server/{ => v5}/model/bot.go | 42 +- .../{ => v5}/model/bot_default_image.go | 4 +- .../{ => v5}/model/builtin.go | 2 +- .../{ => v5}/model/bundle_info.go | 6 +- .../{ => v5}/model/channel.go | 114 +- .../{ => v5}/model/channel_count.go | 2 +- .../{ => v5}/model/channel_data.go | 2 +- .../{ => v5}/model/channel_list.go | 14 +- .../{ => v5}/model/channel_member.go | 45 +- .../{ => v5}/model/channel_member_history.go | 4 +- .../model/channel_member_history_result.go | 11 +- .../{ => v5}/model/channel_mentions.go | 2 +- .../v5/model/channel_search.go | 39 + .../v5/model/channel_sidebar.go | 125 + .../{ => v5}/model/channel_stats.go | 8 +- .../{ => v5}/model/channel_view.go | 4 +- .../{ => v5}/model/client4.go | 1407 +- .../mattermost-server/v5/model/cloud.go | 130 + .../{ => v5}/model/cluster_discovery.go | 6 +- .../{ => v5}/model/cluster_info.go | 15 +- .../{ => v5}/model/cluster_message.go | 29 +- .../{ => v5}/model/cluster_stats.go | 12 +- .../{ => v5}/model/command.go | 32 +- .../v5/model/command_args.go | 59 + .../v5/model/command_autocomplete.go | 455 + .../v5/model/command_request.go | 31 + .../{ => v5}/model/command_response.go | 29 +- .../{ => v5}/model/command_webhook.go | 16 +- .../{ => v5}/model/compliance.go | 69 +- .../{ => v5}/model/compliance_post.go | 56 +- .../{ => v5}/model/config.go | 1937 +- .../{ => v5}/model/data_retention_policy.go | 14 +- .../mattermost-server/{ => v5}/model/emoji.go | 6 +- .../{ => v5}/model/emoji_data.go | 4 +- .../{ => v5}/model/emoji_search.go | 4 +- .../v5/model/feature_flags.go | 51 + .../mattermost-server/{ => v5}/model/file.go | 4 +- .../mattermost-server/v5/model/file_info.go | 228 + .../v5/model/file_info_list.go | 128 + .../v5/model/file_info_search_results.go | 37 + .../{ => v5}/model/gitlab.go | 2 +- .../mattermost-server/v5/model/group.go | 221 + .../{ => v5}/model/group_member.go | 4 +- .../{ => v5}/model/group_syncable.go | 21 +- .../v5/model/guest_invite.go | 53 + .../{ => v5}/model/incoming_webhook.go | 16 +- .../{ => v5}/model/initial_load.go | 14 +- .../{ => v5}/model/integration_action.go | 84 +- .../mattermost-server/v5/model/integrity.go | 58 + .../mattermost-server/{ => v5}/model/job.go | 33 +- .../mattermost-server/v5/model/ldap.go | 10 + .../{ => v5}/model/license.go | 98 +- .../{ => v5}/model/link_metadata.go | 10 +- .../{ => v5}/model/manifest.go | 267 +- .../v5/model/marketplace_plugin.go | 136 + .../mattermost-server/v5/model/mention_map.go | 80 + .../{ => v5}/model/message_export.go | 5 +- .../mattermost-server/v5/model/mfa_secret.go | 25 + .../mattermost-server/v5/model/migration.go | 29 + .../mattermost-server/{ => v5}/model/oauth.go | 6 +- .../{ => v5}/model/outgoing_webhook.go | 13 +- .../{ => v5}/model/permission.go | 702 +- .../{ => v5}/model/plugin_event_data.go | 0 .../{ => v5}/model/plugin_key_value.go | 4 +- .../v5/model/plugin_kvset_options.go | 47 + .../{ => v5}/model/plugin_status.go | 0 .../valid.go => v5/model/plugin_valid.go} | 8 +- .../{ => v5}/model/plugins_response.go | 2 +- .../mattermost-server/{ => v5}/model/post.go | 243 +- .../{ => v5}/model/post_embed.go | 3 +- .../{ => v5}/model/post_list.go | 9 +- .../{ => v5}/model/post_metadata.go | 2 +- .../{ => v5}/model/post_search_results.go | 5 +- .../{ => v5}/model/preference.go | 33 +- .../{ => v5}/model/preferences.go | 7 +- .../v5/model/product_notices.go | 214 + .../v5/model/push_notification.go | 118 + .../{ => v5}/model/push_response.go | 9 +- .../{ => v5}/model/reaction.go | 17 +- .../mattermost-server/v5/model/role.go | 784 + .../mattermost-server/v5/model/saml.go | 200 + .../{ => v5}/model/scheduled_task.go | 4 +- .../{ => v5}/model/scheme.go | 17 +- .../v5/model/search_params.go | 397 + .../{ => v5}/model/security_bulletin.go | 14 +- .../v5/model/serialized_gen.go | 1779 + .../mattermost-server/v5/model/session.go | 231 + .../{ => v5}/model/slack_attachment.go | 10 +- .../{ => v5}/model/slack_compatibility.go | 4 +- .../{ => v5}/model/status.go | 30 +- .../{ => v5}/model/suggest_command.go | 2 +- .../{ => v5}/model/switch_request.go | 10 +- .../mattermost-server/v5/model/system.go | 209 + .../mattermost-server/{ => v5}/model/team.go | 33 +- .../mattermost-server/v5/model/team_member.go | 186 + .../mattermost-server/v5/model/team_search.go | 44 + .../{ => v5}/model/team_stats.go | 4 +- .../{ => v5}/model/terms_of_service.go | 6 +- .../mattermost-server/v5/model/thread.go | 85 + .../mattermost-server/{ => v5}/model/token.go | 8 +- .../v5/model/typing_request.go | 25 + .../v5/model/upload_session.go | 144 + .../mattermost-server/{ => v5}/model/user.go | 120 +- .../{ => v5}/model/user_access_token.go | 8 +- .../model/user_access_token_search.go | 4 +- .../{ => v5}/model/user_autocomplete.go | 9 +- .../mattermost-server/v5/model/user_count.go | 26 + .../{ => v5}/model/user_get.go | 14 +- .../{ => v5}/model/user_search.go | 36 +- .../{ => v5}/model/user_terms_of_service.go | 8 +- .../{ => v5}/model/users_stats.go | 4 +- .../mattermost-server/{ => v5}/model/utils.go | 163 +- .../{ => v5}/model/version.go | 25 +- .../v5/model/websocket_client.go | 321 + .../v5/model/websocket_message.go | 286 + .../{ => v5}/model/websocket_request.go | 11 +- .../mattermost-server/{ => v5}/plugin/api.go | 475 +- .../v5/plugin/api_timer_layer_generated.go | 1101 + .../{ => v5}/plugin/client.go | 4 +- .../{ => v5}/plugin/client_rpc.go | 286 +- .../{ => v5}/plugin/client_rpc_generated.go | 994 +- .../{ => v5}/plugin/context.go | 1 + .../mattermost-server/{ => v5}/plugin/doc.go | 0 .../{ => v5}/plugin/environment.go | 229 +- .../{ => v5}/plugin/hclog_adapter.go | 29 +- .../v5/plugin/health_check.go | 124 + .../mattermost-server/v5/plugin/helpers.go | 97 + .../v5/plugin/helpers_bots.go | 299 + .../v5/plugin/helpers_config.go | 32 + .../mattermost-server/v5/plugin/helpers_kv.go | 222 + .../v5/plugin/helpers_plugin.go | 81 + .../mattermost-server/v5/plugin/hijack.go | 205 + .../{ => v5}/plugin/hooks.go | 96 +- .../v5/plugin/hooks_timer_layer_generated.go | 164 + .../mattermost-server/{ => v5}/plugin/http.go | 26 +- .../{ => v5}/plugin/io_rpc.go | 0 .../{ => v5}/plugin/plugintest/api.go | 573 +- .../{ => v5}/plugin/plugintest/doc.go | 6 +- .../v5/plugin/plugintest/helpers.go | 257 + .../{ => v5}/plugin/plugintest/hooks.go | 25 +- .../{ => v5}/plugin/plugintest/mock/mock.go | 4 +- .../v5/plugin/stringifier.go | 31 + .../{ => v5}/plugin/supervisor.go | 33 +- .../{ => v5}/services/timezones/default.go | 4 +- .../{ => v5}/services/timezones/timezones.go | 2 +- .../mattermost-server/v5/utils/api.go | 141 + .../mattermost-server/v5/utils/archive.go | 65 + .../{ => v5}/utils/authorization.go | 6 +- .../{ => v5}/utils/backoff.go | 4 +- .../mattermost-server/{ => v5}/utils/emoji.go | 26 +- .../mattermost-server/{ => v5}/utils/file.go | 4 +- .../{ => v5}/utils/fileutils/fileutils.go | 31 +- .../mattermost-server/{ => v5}/utils/hash.go | 4 +- .../mattermost-server/{ => v5}/utils/html.go | 32 +- .../mattermost-server/{ => v5}/utils/i18n.go | 26 +- .../{ => v5}/utils/jsonutils/json.go | 2 +- .../{ => v5}/utils/license.go | 45 +- .../{ => v5}/utils/logger.go | 33 +- .../{ => v5}/utils/markdown/autolink.go | 16 +- .../{ => v5}/utils/markdown/block_quote.go | 4 +- .../{ => v5}/utils/markdown/blocks.go | 17 +- .../{ => v5}/utils/markdown/document.go | 4 +- .../{ => v5}/utils/markdown/fenced_code.go | 4 +- .../{ => v5}/utils/markdown/html.go | 4 +- .../{ => v5}/utils/markdown/html_entities.go | 4 +- .../{ => v5}/utils/markdown/indented_code.go | 4 +- .../{ => v5}/utils/markdown/inlines.go | 11 +- .../{ => v5}/utils/markdown/inspect.go | 4 +- .../{ => v5}/utils/markdown/lines.go | 13 +- .../{ => v5}/utils/markdown/links.go | 10 +- .../{ => v5}/utils/markdown/list.go | 4 +- .../{ => v5}/utils/markdown/markdown.go | 7 +- .../{ => v5}/utils/markdown/paragraph.go | 4 +- .../utils/markdown/reference_definition.go | 4 +- .../mattermost-server/{ => v5}/utils/merge.go | 4 +- .../{ => v5}/utils/password.go | 6 +- .../utils/policies-roles-mapping.json | 2 +- .../{ => v5}/utils/random.go | 2 +- .../{ => v5}/utils/subpath.go | 44 +- .../v5/utils/test_files_compiler.go | 45 + .../{ => v5}/utils/textgeneration.go | 26 +- .../mattermost-server/{ => v5}/utils/time.go | 2 +- .../{ => v5}/utils/urlencode.go | 2 +- .../mattermost-server/v5/utils/utils.go | 217 + .../github.com/mattn/go-colorable/.travis.yml | 15 + vendor/github.com/mattn/go-colorable/LICENSE | 21 + .../github.com/mattn/go-colorable/README.md | 48 + .../mattn/go-colorable/colorable_appengine.go | 37 + .../mattn/go-colorable/colorable_others.go | 38 + .../mattn/go-colorable/colorable_windows.go | 1043 + vendor/github.com/mattn/go-colorable/go.mod | 8 + vendor/github.com/mattn/go-colorable/go.sum | 5 + .../github.com/mattn/go-colorable/go.test.sh | 12 + .../mattn/go-colorable/noncolorable.go | 55 + vendor/github.com/mattn/go-isatty/.travis.yml | 14 + vendor/github.com/mattn/go-isatty/LICENSE | 9 + vendor/github.com/mattn/go-isatty/README.md | 50 + vendor/github.com/mattn/go-isatty/doc.go | 2 + vendor/github.com/mattn/go-isatty/go.mod | 5 + vendor/github.com/mattn/go-isatty/go.sum | 2 + vendor/github.com/mattn/go-isatty/go.test.sh | 12 + .../github.com/mattn/go-isatty/isatty_bsd.go | 18 + .../mattn/go-isatty/isatty_others.go | 15 + .../mattn/go-isatty/isatty_plan9.go | 22 + .../mattn/go-isatty/isatty_solaris.go | 22 + .../mattn/go-isatty/isatty_tcgets.go | 18 + .../mattn/go-isatty/isatty_windows.go | 125 + .../github.com/mattn/go-isatty/renovate.json | 8 + .../go-testing-interface/.travis.yml | 1 - .../mitchellh/go-testing-interface/README.md | 8 + .../mitchellh/go-testing-interface/go.mod | 2 + .../mitchellh/go-testing-interface/testing.go | 70 +- .../go-testing-interface/testing_go19.go | 108 - vendor/github.com/oklog/run/.travis.yml | 12 - vendor/github.com/oklog/run/README.md | 10 +- vendor/github.com/oklog/run/actors.go | 38 + vendor/github.com/oklog/run/go.mod | 3 + vendor/github.com/pborman/uuid/go.mod | 3 + vendor/github.com/pborman/uuid/go.sum | 2 + vendor/github.com/pborman/uuid/time.go | 2 +- vendor/github.com/pborman/uuid/uuid.go | 5 +- vendor/github.com/pborman/uuid/version4.go | 2 +- .../github.com/pelletier/go-toml/.travis.yml | 22 - .../github.com/pelletier/go-toml/Dockerfile | 1 + vendor/github.com/pelletier/go-toml/Makefile | 29 + vendor/github.com/pelletier/go-toml/README.md | 18 +- .../github.com/pelletier/go-toml/appveyor.yml | 34 - .../pelletier/go-toml/azure-pipelines.yml | 230 + .../pelletier/go-toml/benchmark.json | 164 - .../github.com/pelletier/go-toml/benchmark.sh | 9 +- .../pelletier/go-toml/benchmark.toml | 244 - .../pelletier/go-toml/benchmark.yml | 121 - vendor/github.com/pelletier/go-toml/doc.go | 2 +- .../pelletier/go-toml/example-crlf.toml | 1 + .../github.com/pelletier/go-toml/example.toml | 1 + vendor/github.com/pelletier/go-toml/fuzz.sh | 0 vendor/github.com/pelletier/go-toml/fuzzit.sh | 26 + vendor/github.com/pelletier/go-toml/go.mod | 6 +- vendor/github.com/pelletier/go-toml/go.sum | 12 + .../pelletier/go-toml/keysparsing.go | 3 +- vendor/github.com/pelletier/go-toml/lexer.go | 161 +- .../github.com/pelletier/go-toml/localtime.go | 281 + .../github.com/pelletier/go-toml/marshal.go | 684 +- .../marshal_OrderPreserve_Map_test.toml | 17 - .../go-toml/marshal_OrderPreserve_test.toml | 1 + .../pelletier/go-toml/marshal_test.toml | 1 + vendor/github.com/pelletier/go-toml/parser.go | 71 +- vendor/github.com/pelletier/go-toml/token.go | 22 +- vendor/github.com/pelletier/go-toml/toml.go | 138 +- .../pelletier/go-toml/tomltree_create.go | 13 + .../pelletier/go-toml/tomltree_write.go | 151 +- vendor/github.com/philhofer/fwd/LICENSE.md | 7 + vendor/github.com/philhofer/fwd/README.md | 315 + vendor/github.com/philhofer/fwd/reader.go | 383 + vendor/github.com/philhofer/fwd/writer.go | 236 + .../philhofer/fwd/writer_appengine.go | 5 + .../github.com/philhofer/fwd/writer_unsafe.go | 18 + vendor/github.com/pkg/errors/.travis.yml | 11 +- vendor/github.com/pkg/errors/Makefile | 44 + vendor/github.com/pkg/errors/README.md | 11 +- vendor/github.com/pkg/errors/errors.go | 8 +- vendor/github.com/pkg/errors/go113.go | 38 + vendor/github.com/pkg/errors/stack.go | 58 +- .../github.com/stretchr/objx/.codeclimate.yml | 8 + vendor/github.com/stretchr/objx/.travis.yml | 19 +- vendor/github.com/stretchr/objx/Gopkg.lock | 30 - vendor/github.com/stretchr/objx/Gopkg.toml | 8 - vendor/github.com/stretchr/objx/README.md | 2 +- vendor/github.com/stretchr/objx/Taskfile.yml | 52 +- vendor/github.com/stretchr/objx/accessors.go | 211 +- vendor/github.com/stretchr/objx/constants.go | 13 - .../github.com/stretchr/objx/conversions.go | 188 +- vendor/github.com/stretchr/objx/go.mod | 8 + vendor/github.com/stretchr/objx/go.sum | 8 + vendor/github.com/stretchr/objx/map.go | 44 +- .../github.com/stretchr/objx/type_specific.go | 346 + .../stretchr/objx/type_specific_codegen.go | 286 +- vendor/github.com/stretchr/objx/value.go | 106 + vendor/github.com/stretchr/testify/LICENSE | 2 +- .../testify/assert/assertion_compare.go | 394 + .../testify/assert/assertion_format.go | 215 +- .../testify/assert/assertion_forward.go | 412 +- .../testify/assert/assertion_order.go | 332 +- .../stretchr/testify/assert/assertions.go | 488 +- .../testify/assert/forward_assertions.go | 2 +- .../testify/assert/http_assertions.go | 25 +- .../github.com/stretchr/testify/mock/mock.go | 138 +- .../testify/require/forward_requirements.go | 2 +- .../stretchr/testify/require/require.go | 528 +- .../testify/require/require_forward.go | 412 +- .../stretchr/testify/require/requirements.go | 2 +- vendor/github.com/tinylib/msgp/LICENSE | 8 + .../tinylib/msgp/msgp/advise_linux.go | 24 + .../tinylib/msgp/msgp/advise_other.go | 17 + .../github.com/tinylib/msgp/msgp/circular.go | 39 + vendor/github.com/tinylib/msgp/msgp/defs.go | 142 + vendor/github.com/tinylib/msgp/msgp/edit.go | 242 + vendor/github.com/tinylib/msgp/msgp/elsize.go | 99 + vendor/github.com/tinylib/msgp/msgp/errors.go | 317 + .../github.com/tinylib/msgp/msgp/extension.go | 549 + vendor/github.com/tinylib/msgp/msgp/file.go | 92 + .../github.com/tinylib/msgp/msgp/file_port.go | 47 + .../github.com/tinylib/msgp/msgp/integers.go | 174 + vendor/github.com/tinylib/msgp/msgp/json.go | 568 + .../tinylib/msgp/msgp/json_bytes.go | 363 + vendor/github.com/tinylib/msgp/msgp/number.go | 267 + vendor/github.com/tinylib/msgp/msgp/purego.go | 15 + vendor/github.com/tinylib/msgp/msgp/read.go | 1363 + .../tinylib/msgp/msgp/read_bytes.go | 1197 + vendor/github.com/tinylib/msgp/msgp/size.go | 38 + vendor/github.com/tinylib/msgp/msgp/unsafe.go | 36 + vendor/github.com/tinylib/msgp/msgp/write.go | 861 + .../tinylib/msgp/msgp/write_bytes.go | 411 + vendor/github.com/wiggin77/cfg/.gitignore | 12 + vendor/github.com/wiggin77/cfg/.travis.yml | 5 + vendor/github.com/wiggin77/cfg/LICENSE | 21 + vendor/github.com/wiggin77/cfg/README.md | 43 + vendor/github.com/wiggin77/cfg/config.go | 366 + vendor/github.com/wiggin77/cfg/go.mod | 5 + vendor/github.com/wiggin77/cfg/go.sum | 2 + vendor/github.com/wiggin77/cfg/ini/ini.go | 167 + vendor/github.com/wiggin77/cfg/ini/parser.go | 142 + vendor/github.com/wiggin77/cfg/ini/section.go | 109 + vendor/github.com/wiggin77/cfg/listener.go | 11 + vendor/github.com/wiggin77/cfg/nocopy.go | 11 + vendor/github.com/wiggin77/cfg/source.go | 58 + vendor/github.com/wiggin77/cfg/srcfile.go | 63 + vendor/github.com/wiggin77/cfg/srcmap.go | 78 + .../github.com/wiggin77/cfg/timeconv/parse.go | 108 + vendor/github.com/wiggin77/merror/.gitignore | 12 + vendor/github.com/wiggin77/merror/.travis.yml | 5 + vendor/github.com/wiggin77/merror/LICENSE | 21 + vendor/github.com/wiggin77/merror/README.md | 6 + vendor/github.com/wiggin77/merror/format.go | 43 + vendor/github.com/wiggin77/merror/go.mod | 5 + vendor/github.com/wiggin77/merror/go.sum | 10 + vendor/github.com/wiggin77/merror/merror.go | 120 + vendor/github.com/wiggin77/srslog/.gitignore | 1 + vendor/github.com/wiggin77/srslog/.travis.yml | 15 + .../wiggin77/srslog/CODE_OF_CONDUCT.md | 50 + vendor/github.com/wiggin77/srslog/LICENSE | 27 + vendor/github.com/wiggin77/srslog/README.md | 147 + .../github.com/wiggin77/srslog/constants.go | 68 + vendor/github.com/wiggin77/srslog/dialer.go | 104 + .../github.com/wiggin77/srslog/formatter.go | 58 + vendor/github.com/wiggin77/srslog/framer.go | 24 + vendor/github.com/wiggin77/srslog/go.mod | 3 + vendor/github.com/wiggin77/srslog/logger.go | 13 + vendor/github.com/wiggin77/srslog/net_conn.go | 76 + vendor/github.com/wiggin77/srslog/srslog.go | 125 + .../github.com/wiggin77/srslog/srslog_unix.go | 54 + vendor/github.com/wiggin77/srslog/writer.go | 201 + vendor/go.uber.org/atomic/.codecov.yml | 4 + vendor/go.uber.org/atomic/.gitignore | 3 +- vendor/go.uber.org/atomic/.travis.yml | 20 +- vendor/go.uber.org/atomic/CHANGELOG.md | 76 + vendor/go.uber.org/atomic/Makefile | 99 +- vendor/go.uber.org/atomic/README.md | 31 +- vendor/go.uber.org/atomic/atomic.go | 351 - vendor/go.uber.org/atomic/bool.go | 81 + vendor/go.uber.org/atomic/bool_ext.go | 53 + vendor/go.uber.org/atomic/doc.go | 23 + vendor/go.uber.org/atomic/duration.go | 82 + vendor/go.uber.org/atomic/duration_ext.go | 40 + vendor/go.uber.org/atomic/error.go | 48 +- vendor/go.uber.org/atomic/error_ext.go | 39 + vendor/go.uber.org/atomic/float64.go | 76 + vendor/go.uber.org/atomic/float64_ext.go | 47 + vendor/go.uber.org/atomic/gen.go | 26 + vendor/go.uber.org/atomic/glide.lock | 17 - vendor/go.uber.org/atomic/glide.yaml | 6 - vendor/go.uber.org/atomic/go.mod | 8 + vendor/go.uber.org/atomic/go.sum | 9 + vendor/go.uber.org/atomic/int32.go | 102 + vendor/go.uber.org/atomic/int64.go | 102 + vendor/go.uber.org/atomic/nocmp.go | 35 + vendor/go.uber.org/atomic/string.go | 41 +- vendor/go.uber.org/atomic/string_ext.go | 43 + vendor/go.uber.org/atomic/uint32.go | 102 + vendor/go.uber.org/atomic/uint64.go | 102 + vendor/go.uber.org/atomic/value.go | 31 + vendor/go.uber.org/multierr/.gitignore | 3 + vendor/go.uber.org/multierr/.travis.yml | 18 +- vendor/go.uber.org/multierr/CHANGELOG.md | 32 + vendor/go.uber.org/multierr/Makefile | 56 +- vendor/go.uber.org/multierr/README.md | 4 +- vendor/go.uber.org/multierr/error.go | 60 +- vendor/go.uber.org/multierr/glide.lock | 19 - vendor/go.uber.org/multierr/go.mod | 8 + vendor/go.uber.org/multierr/go.sum | 11 + vendor/go.uber.org/multierr/go113.go | 52 + vendor/go.uber.org/zap/.gitignore | 4 + vendor/go.uber.org/zap/.readme.tmpl | 5 +- vendor/go.uber.org/zap/.travis.yml | 20 +- vendor/go.uber.org/zap/CHANGELOG.md | 105 + vendor/go.uber.org/zap/FAQ.md | 1 + vendor/go.uber.org/zap/Makefile | 77 +- vendor/go.uber.org/zap/README.md | 68 +- vendor/go.uber.org/zap/buffer/buffer.go | 10 +- .../zap/{check_license.sh => checklicense.sh} | 0 vendor/go.uber.org/zap/config.go | 31 +- vendor/go.uber.org/zap/encoder.go | 4 + vendor/go.uber.org/zap/field.go | 231 +- vendor/go.uber.org/zap/glide.lock | 76 - vendor/go.uber.org/zap/glide.yaml | 3 +- vendor/go.uber.org/zap/go.mod | 13 + vendor/go.uber.org/zap/go.sum | 56 + vendor/go.uber.org/zap/logger.go | 47 +- vendor/go.uber.org/zap/options.go | 39 +- vendor/go.uber.org/zap/sink.go | 2 +- vendor/go.uber.org/zap/stacktrace.go | 47 +- .../zap/zapcore/console_encoder.go | 30 +- vendor/go.uber.org/zap/zapcore/encoder.go | 109 +- vendor/go.uber.org/zap/zapcore/entry.go | 17 +- vendor/go.uber.org/zap/zapcore/error.go | 5 - vendor/go.uber.org/zap/zapcore/field.go | 25 +- .../go.uber.org/zap/zapcore/increase_level.go | 66 + .../go.uber.org/zap/zapcore/json_encoder.go | 65 +- vendor/go.uber.org/zap/zapcore/marshaler.go | 8 + vendor/go.uber.org/zap/zapcore/sampler.go | 94 +- vendor/golang.org/x/image/AUTHORS | 3 + vendor/golang.org/x/image/CONTRIBUTORS | 3 + vendor/golang.org/x/image/LICENSE | 27 + vendor/golang.org/x/image/PATENTS | 22 + vendor/golang.org/x/image/bmp/reader.go | 219 + vendor/golang.org/x/image/bmp/writer.go | 262 + vendor/golang.org/x/image/ccitt/reader.go | 795 + vendor/golang.org/x/image/ccitt/table.go | 972 + vendor/golang.org/x/image/ccitt/writer.go | 102 + vendor/golang.org/x/image/tiff/buffer.go | 69 + vendor/golang.org/x/image/tiff/compress.go | 58 + vendor/golang.org/x/image/tiff/consts.go | 149 + vendor/golang.org/x/image/tiff/fuzz.go | 29 + vendor/golang.org/x/image/tiff/lzw/reader.go | 272 + vendor/golang.org/x/image/tiff/reader.go | 709 + vendor/golang.org/x/image/tiff/writer.go | 438 + vendor/golang.org/x/net/html/atom/gen.go | 712 - vendor/golang.org/x/net/html/const.go | 3 +- vendor/golang.org/x/net/html/foreign.go | 120 +- vendor/golang.org/x/net/html/node.go | 5 + vendor/golang.org/x/net/html/parse.go | 313 +- vendor/golang.org/x/net/html/render.go | 34 +- vendor/golang.org/x/net/html/token.go | 9 +- .../x/net/http2/client_conn_pool.go | 8 +- vendor/golang.org/x/net/http2/flow.go | 2 + .../golang.org/x/net/http2/hpack/huffman.go | 7 + vendor/golang.org/x/net/http2/http2.go | 13 +- vendor/golang.org/x/net/http2/pipe.go | 7 +- vendor/golang.org/x/net/http2/server.go | 39 +- vendor/golang.org/x/net/http2/transport.go | 184 +- .../x/net/http2/writesched_random.go | 9 +- vendor/golang.org/x/net/idna/tables11.0.0.go | 2 +- vendor/golang.org/x/net/idna/tables12.0.0.go | 4733 + vendor/golang.org/x/net/idna/tables13.0.0.go | 4839 + .../x/net/internal/timeseries/timeseries.go | 6 +- .../sys/internal/unsafeheader/unsafeheader.go | 30 + vendor/golang.org/x/sys/unix/README.md | 15 +- vendor/golang.org/x/sys/unix/asm_aix_ppc64.s | 2 +- vendor/golang.org/x/sys/unix/asm_darwin_386.s | 2 +- .../golang.org/x/sys/unix/asm_darwin_amd64.s | 2 +- vendor/golang.org/x/sys/unix/asm_darwin_arm.s | 2 +- .../golang.org/x/sys/unix/asm_darwin_arm64.s | 2 +- .../x/sys/unix/asm_dragonfly_amd64.s | 2 +- .../golang.org/x/sys/unix/asm_freebsd_386.s | 2 +- .../golang.org/x/sys/unix/asm_freebsd_amd64.s | 2 +- .../golang.org/x/sys/unix/asm_freebsd_arm.s | 2 +- .../golang.org/x/sys/unix/asm_freebsd_arm64.s | 2 +- vendor/golang.org/x/sys/unix/asm_linux_386.s | 2 +- .../golang.org/x/sys/unix/asm_linux_amd64.s | 2 +- vendor/golang.org/x/sys/unix/asm_linux_arm.s | 2 +- .../golang.org/x/sys/unix/asm_linux_arm64.s | 2 +- .../golang.org/x/sys/unix/asm_linux_mips64x.s | 2 +- .../golang.org/x/sys/unix/asm_linux_mipsx.s | 2 +- .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 2 +- .../golang.org/x/sys/unix/asm_linux_riscv64.s | 9 +- .../golang.org/x/sys/unix/asm_linux_s390x.s | 2 +- vendor/golang.org/x/sys/unix/asm_netbsd_386.s | 2 +- .../golang.org/x/sys/unix/asm_netbsd_amd64.s | 2 +- vendor/golang.org/x/sys/unix/asm_netbsd_arm.s | 2 +- .../golang.org/x/sys/unix/asm_netbsd_arm64.s | 2 +- .../golang.org/x/sys/unix/asm_openbsd_386.s | 2 +- .../golang.org/x/sys/unix/asm_openbsd_amd64.s | 2 +- .../golang.org/x/sys/unix/asm_openbsd_arm.s | 2 +- .../golang.org/x/sys/unix/asm_openbsd_arm64.s | 2 +- .../x/sys/unix/asm_openbsd_mips64.s | 29 + .../golang.org/x/sys/unix/asm_solaris_amd64.s | 2 +- .../golang.org/x/sys/unix/bluetooth_linux.go | 1 + vendor/golang.org/x/sys/unix/endian_big.go | 2 +- vendor/golang.org/x/sys/unix/endian_little.go | 2 +- .../x/sys/unix/errors_freebsd_386.go | 6 + .../x/sys/unix/errors_freebsd_amd64.go | 6 + .../x/sys/unix/errors_freebsd_arm64.go | 17 + vendor/golang.org/x/sys/unix/fcntl.go | 12 +- vendor/golang.org/x/sys/unix/fcntl_darwin.go | 6 + .../x/sys/unix/fcntl_linux_32bit.go | 4 +- vendor/golang.org/x/sys/unix/fdset.go | 29 + vendor/golang.org/x/sys/unix/gccgo.go | 2 - vendor/golang.org/x/sys/unix/gccgo_c.c | 6 + vendor/golang.org/x/sys/unix/ioctl.go | 9 + vendor/golang.org/x/sys/unix/mkall.sh | 32 +- vendor/golang.org/x/sys/unix/mkasm_darwin.go | 61 - vendor/golang.org/x/sys/unix/mkerrors.sh | 58 +- vendor/golang.org/x/sys/unix/mkpost.go | 122 - vendor/golang.org/x/sys/unix/mksyscall.go | 407 - .../x/sys/unix/mksyscall_aix_ppc.go | 415 - .../x/sys/unix/mksyscall_aix_ppc64.go | 614 - .../x/sys/unix/mksyscall_solaris.go | 335 - .../golang.org/x/sys/unix/mksysctl_openbsd.go | 355 - vendor/golang.org/x/sys/unix/mksysnum.go | 190 - vendor/golang.org/x/sys/unix/ptrace_darwin.go | 11 + vendor/golang.org/x/sys/unix/ptrace_ios.go | 11 + .../x/sys/unix/sockcmsg_dragonfly.go | 16 + .../golang.org/x/sys/unix/sockcmsg_linux.go | 2 +- vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 36 +- .../x/sys/unix/sockcmsg_unix_other.go | 42 + vendor/golang.org/x/sys/unix/syscall.go | 43 +- vendor/golang.org/x/sys/unix/syscall_aix.go | 16 + vendor/golang.org/x/sys/unix/syscall_bsd.go | 57 +- .../x/sys/unix/syscall_darwin.1_12.go | 31 + .../x/sys/unix/syscall_darwin.1_13.go | 107 + .../golang.org/x/sys/unix/syscall_darwin.go | 165 +- .../x/sys/unix/syscall_darwin_386.go | 24 +- .../x/sys/unix/syscall_darwin_amd64.go | 24 +- .../x/sys/unix/syscall_darwin_arm.go | 29 +- .../x/sys/unix/syscall_darwin_arm64.go | 32 +- .../x/sys/unix/syscall_darwin_libSystem.go | 2 + .../x/sys/unix/syscall_dragonfly.go | 58 +- .../golang.org/x/sys/unix/syscall_freebsd.go | 44 +- .../x/sys/unix/syscall_freebsd_386.go | 10 + .../x/sys/unix/syscall_freebsd_amd64.go | 10 + .../x/sys/unix/syscall_freebsd_arm.go | 6 + .../x/sys/unix/syscall_freebsd_arm64.go | 6 + .../golang.org/x/sys/unix/syscall_illumos.go | 90 + vendor/golang.org/x/sys/unix/syscall_linux.go | 513 +- .../x/sys/unix/syscall_linux_386.go | 9 +- .../x/sys/unix/syscall_linux_amd64.go | 6 +- .../x/sys/unix/syscall_linux_amd64_gc.go | 2 +- .../x/sys/unix/syscall_linux_arm.go | 11 +- .../x/sys/unix/syscall_linux_arm64.go | 32 +- .../golang.org/x/sys/unix/syscall_linux_gc.go | 2 +- .../x/sys/unix/syscall_linux_gc_386.go | 2 +- .../x/sys/unix/syscall_linux_gc_arm.go | 13 + .../x/sys/unix/syscall_linux_mips64x.go | 10 +- .../x/sys/unix/syscall_linux_mipsx.go | 6 +- .../x/sys/unix/syscall_linux_ppc64x.go | 6 +- .../x/sys/unix/syscall_linux_riscv64.go | 12 +- .../x/sys/unix/syscall_linux_s390x.go | 6 +- .../x/sys/unix/syscall_linux_sparc64.go | 6 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 53 +- .../golang.org/x/sys/unix/syscall_openbsd.go | 54 +- .../x/sys/unix/syscall_openbsd_mips64.go | 35 + .../golang.org/x/sys/unix/syscall_solaris.go | 11 +- vendor/golang.org/x/sys/unix/syscall_unix.go | 19 +- .../golang.org/x/sys/unix/syscall_unix_gc.go | 2 +- .../x/sys/unix/syscall_unix_gc_ppc64x.go | 2 +- vendor/golang.org/x/sys/unix/timestruct.go | 26 +- vendor/golang.org/x/sys/unix/types_aix.go | 237 - vendor/golang.org/x/sys/unix/types_darwin.go | 283 - .../golang.org/x/sys/unix/types_dragonfly.go | 263 - vendor/golang.org/x/sys/unix/types_freebsd.go | 400 - vendor/golang.org/x/sys/unix/types_netbsd.go | 290 - vendor/golang.org/x/sys/unix/types_openbsd.go | 283 - vendor/golang.org/x/sys/unix/types_solaris.go | 266 - .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 12 +- .../x/sys/unix/zerrors_aix_ppc64.go | 12 +- .../x/sys/unix/zerrors_darwin_386.go | 7 +- .../x/sys/unix/zerrors_darwin_amd64.go | 7 +- .../x/sys/unix/zerrors_darwin_arm.go | 7 +- .../x/sys/unix/zerrors_darwin_arm64.go | 7 +- .../x/sys/unix/zerrors_dragonfly_amd64.go | 139 +- .../x/sys/unix/zerrors_freebsd_386.go | 169 +- .../x/sys/unix/zerrors_freebsd_amd64.go | 167 +- .../x/sys/unix/zerrors_freebsd_arm.go | 25 +- .../x/sys/unix/zerrors_freebsd_arm64.go | 168 +- vendor/golang.org/x/sys/unix/zerrors_linux.go | 2667 + .../x/sys/unix/zerrors_linux_386.go | 3274 +- .../x/sys/unix/zerrors_linux_amd64.go | 3274 +- .../x/sys/unix/zerrors_linux_arm.go | 3286 +- .../x/sys/unix/zerrors_linux_arm64.go | 3263 +- .../x/sys/unix/zerrors_linux_mips.go | 3278 +- .../x/sys/unix/zerrors_linux_mips64.go | 3278 +- .../x/sys/unix/zerrors_linux_mips64le.go | 3278 +- .../x/sys/unix/zerrors_linux_mipsle.go | 3278 +- .../x/sys/unix/zerrors_linux_ppc64.go | 3397 +- .../x/sys/unix/zerrors_linux_ppc64le.go | 3397 +- .../x/sys/unix/zerrors_linux_riscv64.go | 3248 +- .../x/sys/unix/zerrors_linux_s390x.go | 3394 +- .../x/sys/unix/zerrors_linux_sparc64.go | 3375 +- .../x/sys/unix/zerrors_netbsd_386.go | 9 +- .../x/sys/unix/zerrors_netbsd_amd64.go | 9 +- .../x/sys/unix/zerrors_netbsd_arm.go | 9 +- .../x/sys/unix/zerrors_netbsd_arm64.go | 9 +- .../x/sys/unix/zerrors_openbsd_386.go | 24 +- .../x/sys/unix/zerrors_openbsd_amd64.go | 13 +- .../x/sys/unix/zerrors_openbsd_arm.go | 18 +- .../x/sys/unix/zerrors_openbsd_arm64.go | 8 + .../x/sys/unix/zerrors_openbsd_mips64.go | 1862 + .../x/sys/unix/zerrors_solaris_amd64.go | 25 +- ...acearm_linux.go => zptrace_armnn_linux.go} | 2 +- .../x/sys/unix/zptrace_linux_arm64.go | 17 + ...emips_linux.go => zptrace_mipsnn_linux.go} | 2 +- ...sle_linux.go => zptrace_mipsnnle_linux.go} | 2 +- ...trace386_linux.go => zptrace_x86_linux.go} | 2 +- .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 2 +- .../x/sys/unix/zsyscall_darwin_386.1_13.go | 39 + .../x/sys/unix/zsyscall_darwin_386.1_13.s | 12 + .../x/sys/unix/zsyscall_darwin_386.go | 396 +- .../x/sys/unix/zsyscall_darwin_386.s | 28 +- .../x/sys/unix/zsyscall_darwin_amd64.1_11.go | 1810 - .../x/sys/unix/zsyscall_darwin_amd64.1_13.go | 39 + .../x/sys/unix/zsyscall_darwin_amd64.1_13.s | 12 + .../x/sys/unix/zsyscall_darwin_amd64.go | 396 +- .../x/sys/unix/zsyscall_darwin_amd64.s | 28 +- .../x/sys/unix/zsyscall_darwin_arm.1_11.go | 1767 - .../x/sys/unix/zsyscall_darwin_arm.1_13.go | 39 + .../x/sys/unix/zsyscall_darwin_arm.1_13.s | 12 + .../x/sys/unix/zsyscall_darwin_arm.go | 324 +- .../x/sys/unix/zsyscall_darwin_arm.s | 24 +- .../x/sys/unix/zsyscall_darwin_arm64.1_13.go | 39 + .../x/sys/unix/zsyscall_darwin_arm64.1_13.s | 12 + .../x/sys/unix/zsyscall_darwin_arm64.go | 338 +- .../x/sys/unix/zsyscall_darwin_arm64.s | 22 +- .../x/sys/unix/zsyscall_dragonfly_amd64.go | 58 +- .../x/sys/unix/zsyscall_freebsd_386.go | 68 +- .../x/sys/unix/zsyscall_freebsd_amd64.go | 66 +- .../x/sys/unix/zsyscall_freebsd_arm.go | 56 +- .../x/sys/unix/zsyscall_freebsd_arm64.go | 58 +- .../x/sys/unix/zsyscall_illumos_amd64.go | 114 + ...l_darwin_386.1_11.go => zsyscall_linux.go} | 1383 +- .../x/sys/unix/zsyscall_linux_386.go | 1744 +- .../x/sys/unix/zsyscall_linux_amd64.go | 1744 +- .../x/sys/unix/zsyscall_linux_arm.go | 1744 +- .../x/sys/unix/zsyscall_linux_arm64.go | 1746 +- .../x/sys/unix/zsyscall_linux_mips.go | 1744 +- .../x/sys/unix/zsyscall_linux_mips64.go | 1744 +- .../x/sys/unix/zsyscall_linux_mips64le.go | 1744 +- .../x/sys/unix/zsyscall_linux_mipsle.go | 1744 +- .../x/sys/unix/zsyscall_linux_ppc64.go | 1744 +- .../x/sys/unix/zsyscall_linux_ppc64le.go | 1744 +- .../x/sys/unix/zsyscall_linux_riscv64.go | 1742 +- .../x/sys/unix/zsyscall_linux_s390x.go | 1744 +- .../x/sys/unix/zsyscall_linux_sparc64.go | 1744 +- .../x/sys/unix/zsyscall_netbsd_386.go | 83 +- .../x/sys/unix/zsyscall_netbsd_amd64.go | 83 +- .../x/sys/unix/zsyscall_netbsd_arm.go | 83 +- .../x/sys/unix/zsyscall_netbsd_arm64.go | 83 +- .../x/sys/unix/zsyscall_openbsd_386.go | 62 +- .../x/sys/unix/zsyscall_openbsd_amd64.go | 62 +- .../x/sys/unix/zsyscall_openbsd_arm.go | 62 +- .../x/sys/unix/zsyscall_openbsd_arm64.go | 62 +- ...m64.1_11.go => zsyscall_openbsd_mips64.go} | 519 +- .../x/sys/unix/zsyscall_solaris_amd64.go | 5 +- .../x/sys/unix/zsysctl_openbsd_386.go | 3 +- .../x/sys/unix/zsysctl_openbsd_amd64.go | 1 + .../x/sys/unix/zsysctl_openbsd_arm.go | 1 + .../x/sys/unix/zsysctl_openbsd_mips64.go | 279 + .../x/sys/unix/zsysnum_darwin_386.go | 1 + .../x/sys/unix/zsysnum_darwin_amd64.go | 1 + .../x/sys/unix/zsysnum_darwin_arm.go | 1 + .../x/sys/unix/zsysnum_darwin_arm64.go | 1 + .../x/sys/unix/zsysnum_dragonfly_amd64.go | 255 +- .../x/sys/unix/zsysnum_linux_386.go | 5 + .../x/sys/unix/zsysnum_linux_amd64.go | 5 + .../x/sys/unix/zsysnum_linux_arm.go | 5 + .../x/sys/unix/zsysnum_linux_arm64.go | 6 + .../x/sys/unix/zsysnum_linux_mips.go | 6 + .../x/sys/unix/zsysnum_linux_mips64.go | 6 + .../x/sys/unix/zsysnum_linux_mips64le.go | 6 + .../x/sys/unix/zsysnum_linux_mipsle.go | 6 + .../x/sys/unix/zsysnum_linux_ppc64.go | 5 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 5 + .../x/sys/unix/zsysnum_linux_riscv64.go | 5 + .../x/sys/unix/zsysnum_linux_s390x.go | 5 + .../x/sys/unix/zsysnum_linux_sparc64.go | 5 + .../x/sys/unix/zsysnum_openbsd_mips64.go | 220 + .../golang.org/x/sys/unix/ztypes_aix_ppc.go | 1 + .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 1 + .../x/sys/unix/ztypes_darwin_386.go | 33 +- .../x/sys/unix/ztypes_darwin_amd64.go | 44 +- .../x/sys/unix/ztypes_darwin_arm.go | 40 +- .../x/sys/unix/ztypes_darwin_arm64.go | 44 +- .../x/sys/unix/ztypes_dragonfly_amd64.go | 57 +- .../x/sys/unix/ztypes_freebsd_386.go | 52 +- .../x/sys/unix/ztypes_freebsd_amd64.go | 37 +- .../x/sys/unix/ztypes_freebsd_arm.go | 25 +- .../x/sys/unix/ztypes_freebsd_arm64.go | 67 +- vendor/golang.org/x/sys/unix/ztypes_linux.go | 3224 + .../golang.org/x/sys/unix/ztypes_linux_386.go | 2035 +- .../x/sys/unix/ztypes_linux_amd64.go | 2039 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 2039 +- .../x/sys/unix/ztypes_linux_arm64.go | 2039 +- .../x/sys/unix/ztypes_linux_mips.go | 2039 +- .../x/sys/unix/ztypes_linux_mips64.go | 2040 +- .../x/sys/unix/ztypes_linux_mips64le.go | 2040 +- .../x/sys/unix/ztypes_linux_mipsle.go | 2039 +- .../x/sys/unix/ztypes_linux_ppc64.go | 2039 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 2039 +- .../x/sys/unix/ztypes_linux_riscv64.go | 2039 +- .../x/sys/unix/ztypes_linux_s390x.go | 2039 +- .../x/sys/unix/ztypes_linux_sparc64.go | 2039 +- .../x/sys/unix/ztypes_netbsd_386.go | 33 + .../x/sys/unix/ztypes_netbsd_amd64.go | 34 + .../x/sys/unix/ztypes_netbsd_arm.go | 33 + .../x/sys/unix/ztypes_netbsd_arm64.go | 34 + .../x/sys/unix/ztypes_openbsd_386.go | 1 + .../x/sys/unix/ztypes_openbsd_amd64.go | 1 + .../x/sys/unix/ztypes_openbsd_arm.go | 1 + .../x/sys/unix/ztypes_openbsd_arm64.go | 1 + .../x/sys/unix/ztypes_openbsd_mips64.go | 566 + .../x/sys/unix/ztypes_solaris_amd64.go | 39 +- vendor/golang.org/x/text/collate/collate.go | 403 - vendor/golang.org/x/text/collate/index.go | 32 - .../golang.org/x/text/collate/maketables.go | 553 - vendor/golang.org/x/text/collate/option.go | 239 - vendor/golang.org/x/text/collate/sort.go | 81 - vendor/golang.org/x/text/collate/tables.go | 73789 ---------------- .../x/text/internal/colltab/collelem.go | 371 - .../x/text/internal/colltab/colltab.go | 105 - .../x/text/internal/colltab/contract.go | 145 - .../x/text/internal/colltab/iter.go | 178 - .../x/text/internal/colltab/numeric.go | 236 - .../x/text/internal/colltab/table.go | 275 - .../x/text/internal/colltab/trie.go | 159 - .../x/text/internal/colltab/weighter.go | 31 - vendor/golang.org/x/text/internal/gen/code.go | 375 - vendor/golang.org/x/text/internal/gen/gen.go | 347 - .../x/text/internal/language/compact/gen.go | 64 - .../internal/language/compact/gen_index.go | 113 - .../internal/language/compact/gen_parents.go | 54 - .../text/internal/language/compact/tables.go | 36 +- .../x/text/internal/language/gen.go | 1520 - .../x/text/internal/language/gen_common.go | 20 - .../x/text/internal/language/tables.go | 4089 +- .../x/text/internal/triegen/compact.go | 58 - .../x/text/internal/triegen/print.go | 251 - .../x/text/internal/triegen/triegen.go | 494 - vendor/golang.org/x/text/internal/ucd/ucd.go | 371 - vendor/golang.org/x/text/language/gen.go | 305 - vendor/golang.org/x/text/language/tables.go | 78 +- .../golang.org/x/text/transform/transform.go | 6 +- vendor/golang.org/x/text/unicode/bidi/core.go | 8 +- vendor/golang.org/x/text/unicode/bidi/gen.go | 133 - .../x/text/unicode/bidi/gen_ranges.go | 57 - .../x/text/unicode/bidi/gen_trieval.go | 64 - .../x/text/unicode/bidi/tables11.0.0.go | 2 +- .../x/text/unicode/bidi/tables12.0.0.go | 1923 + .../x/text/unicode/bidi/tables13.0.0.go | 1955 + vendor/golang.org/x/text/unicode/cldr/base.go | 105 - vendor/golang.org/x/text/unicode/cldr/cldr.go | 137 - .../golang.org/x/text/unicode/cldr/collate.go | 359 - .../golang.org/x/text/unicode/cldr/decode.go | 172 - .../golang.org/x/text/unicode/cldr/makexml.go | 400 - .../golang.org/x/text/unicode/cldr/resolve.go | 602 - .../golang.org/x/text/unicode/cldr/slice.go | 144 - vendor/golang.org/x/text/unicode/cldr/xml.go | 1494 - .../x/text/unicode/norm/maketables.go | 986 - .../x/text/unicode/norm/tables11.0.0.go | 2 +- .../x/text/unicode/norm/tables12.0.0.go | 7710 ++ .../x/text/unicode/norm/tables13.0.0.go | 7760 ++ .../golang.org/x/text/unicode/norm/triegen.go | 117 - .../x/text/unicode/rangetable/gen.go | 115 - .../x/text/unicode/rangetable/merge.go | 260 - .../x/text/unicode/rangetable/rangetable.go | 70 - .../x/text/unicode/rangetable/tables10.0.0.go | 6378 -- .../x/text/unicode/rangetable/tables11.0.0.go | 7029 -- .../x/text/unicode/rangetable/tables9.0.0.go | 5737 -- .../googleapis/rpc/status/status.pb.go | 279 +- vendor/google.golang.org/grpc/.travis.yml | 33 +- .../google.golang.org/grpc/CODE-OF-CONDUCT.md | 3 + vendor/google.golang.org/grpc/CONTRIBUTING.md | 5 +- vendor/google.golang.org/grpc/GOVERNANCE.md | 1 + vendor/google.golang.org/grpc/MAINTAINERS.md | 27 + vendor/google.golang.org/grpc/Makefile | 4 + vendor/google.golang.org/grpc/README.md | 132 +- .../grpc/attributes/attributes.go | 79 + vendor/google.golang.org/grpc/backoff.go | 23 + .../google.golang.org/grpc/backoff/backoff.go | 52 + vendor/google.golang.org/grpc/balancer.go | 391 - .../grpc/balancer/balancer.go | 174 +- .../grpc/balancer/base/balancer.go | 129 +- .../grpc/balancer/base/base.go | 25 +- .../grpc/balancer/grpclb/state/state.go | 51 + .../grpc/balancer/roundrobin/roundrobin.go | 18 +- .../grpc/balancer_conn_wrappers.go | 158 +- .../grpc/balancer_v1_wrapper.go | 334 - .../grpc_binarylog_v1/binarylog.pb.go | 1369 +- vendor/google.golang.org/grpc/clientconn.go | 535 +- vendor/google.golang.org/grpc/codegen.sh | 0 vendor/google.golang.org/grpc/codes/codes.go | 46 + .../grpc/connectivity/connectivity.go | 16 +- .../grpc/credentials/credentials.go | 318 +- .../grpc/credentials/{tls13.go => go12.go} | 0 .../google.golang.org/grpc/credentials/tls.go | 233 + vendor/google.golang.org/grpc/dialoptions.go | 191 +- vendor/google.golang.org/grpc/doc.go | 2 + .../grpc/encoding/encoding.go | 14 +- .../grpc/encoding/proto/proto.go | 66 +- vendor/google.golang.org/grpc/go.mod | 20 +- vendor/google.golang.org/grpc/go.sum | 76 +- .../grpc/grpclog/component.go | 117 + .../google.golang.org/grpc/grpclog/grpclog.go | 44 +- .../google.golang.org/grpc/grpclog/logger.go | 4 +- .../grpc/grpclog/loggerv2.go | 28 +- .../google.golang.org/grpc/health/client.go | 42 +- .../grpc/health/grpc_health_v1/health.pb.go | 512 +- .../health/grpc_health_v1/health_grpc.pb.go | 193 + .../transport/log.go => health/logging.go} | 27 +- .../grpc/health/regenerate.sh | 33 - .../google.golang.org/grpc/health/server.go | 16 +- vendor/google.golang.org/grpc/install_gae.sh | 2 +- vendor/google.golang.org/grpc/interceptor.go | 36 +- .../grpc/internal/backoff/backoff.go | 27 +- .../grpc/internal/binarylog/binarylog.go | 19 +- .../grpc/internal/binarylog/env_config.go | 8 +- .../grpc/internal/binarylog/method_logger.go | 15 +- .../grpc/internal/binarylog/regenerate.sh | 33 - .../grpc/internal/binarylog/sink.go | 73 +- .../grpc/internal/binarylog/util.go | 41 - .../grpc/internal/buffer/unbounded.go | 85 + .../grpc/internal/channelz/funcs.go | 18 +- .../grpc/internal/channelz/logging.go | 102 + .../grpc/internal/channelz/types.go | 33 +- .../grpc/internal/channelz/types_nonlinux.go | 4 +- .../grpc/internal/credentials/spiffe.go | 77 + .../internal/credentials/spiffe_appengine.go | 31 + .../credentials}/syscallconn.go | 3 +- .../credentials}/syscallconn_appengine.go | 2 +- .../grpc/internal/credentials/util.go | 50 + .../grpc/internal/envconfig/envconfig.go | 7 +- .../grpc/internal/grpclog/grpclog.go | 126 + .../grpc/internal/grpclog/prefixLogger.go | 81 + .../grpc/internal/grpcutil/encode_duration.go | 63 + .../grpc/internal/grpcutil/metadata.go | 40 + .../grpc/internal/grpcutil/method.go | 84 + .../grpc/internal/grpcutil/target.go | 68 + .../grpc/internal/internal.go | 38 +- .../grpc/internal/resolver/config_selector.go | 93 + .../resolver/dns/dns_resolver.go | 221 +- .../grpc/internal/resolver/dns/go113.go | 33 + .../resolver/passthrough/passthrough.go | 4 +- .../grpc/internal/resolver/unix/unix.go | 49 + .../internal/serviceconfig/serviceconfig.go | 162 + .../grpc/internal/status/status.go | 162 + .../grpc/internal/syscall/syscall_linux.go | 6 +- .../grpc/internal/syscall/syscall_nonlinux.go | 5 +- .../grpc/internal/transport/controlbuf.go | 88 +- .../grpc/internal/transport/handler_server.go | 86 +- .../grpc/internal/transport/http2_client.go | 331 +- .../grpc/internal/transport/http2_server.go | 231 +- .../grpc/internal/transport/http_util.go | 107 +- .../transport/networktype/networktype.go | 46 + .../grpc/{ => internal/transport}/proxy.go | 52 +- .../grpc/internal/transport/transport.go | 86 +- .../grpc/naming/dns_resolver.go | 293 - .../google.golang.org/grpc/naming/naming.go | 68 - .../google.golang.org/grpc/picker_wrapper.go | 140 +- vendor/google.golang.org/grpc/pickfirst.go | 88 +- vendor/google.golang.org/grpc/preloader.go | 5 +- .../grpc/reflection/README.md | 18 + .../grpc_reflection_v1alpha/reflection.pb.go | 953 + .../grpc_reflection_v1alpha/reflection.proto | 138 + .../reflection_grpc.pb.go | 131 + .../grpc/reflection/serverreflection.go | 486 + vendor/google.golang.org/grpc/regenerate.sh | 139 + .../grpc/resolver/resolver.go | 99 +- .../grpc/resolver_conn_wrapper.go | 200 +- vendor/google.golang.org/grpc/rpc_util.go | 240 +- vendor/google.golang.org/grpc/server.go | 552 +- .../google.golang.org/grpc/service_config.go | 221 +- .../grpc/serviceconfig/serviceconfig.go | 26 +- vendor/google.golang.org/grpc/stats/stats.go | 24 +- .../google.golang.org/grpc/status/status.go | 119 +- vendor/google.golang.org/grpc/stream.go | 108 +- vendor/google.golang.org/grpc/tap/tap.go | 7 +- vendor/google.golang.org/grpc/trace.go | 3 - vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 186 +- vendor/google.golang.org/protobuf/AUTHORS | 3 + .../google.golang.org/protobuf/CONTRIBUTORS | 3 + vendor/google.golang.org/protobuf/LICENSE | 27 + vendor/google.golang.org/protobuf/PATENTS | 22 + .../protobuf/encoding/prototext/decode.go | 791 + .../protobuf/encoding/prototext/doc.go | 7 + .../protobuf/encoding/prototext/encode.go | 433 + .../protobuf/encoding/protowire/wire.go | 538 + .../protobuf/internal/descfmt/stringer.go | 316 + .../protobuf/internal/descopts/options.go | 29 + .../protobuf/internal/detrand/rand.go | 61 + .../internal/encoding/defval/default.go | 213 + .../encoding/messageset/messageset.go | 258 + .../protobuf/internal/encoding/tag/tag.go | 207 + .../protobuf/internal/encoding/text/decode.go | 665 + .../internal/encoding/text/decode_number.go | 190 + .../internal/encoding/text/decode_string.go | 161 + .../internal/encoding/text/decode_token.go | 373 + .../protobuf/internal/encoding/text/doc.go | 29 + .../protobuf/internal/encoding/text/encode.go | 267 + .../protobuf/internal/errors/errors.go | 89 + .../protobuf/internal/errors/is_go112.go | 39 + .../protobuf/internal/errors/is_go113.go | 12 + .../protobuf/internal/fieldsort/fieldsort.go | 40 + .../protobuf/internal/filedesc/build.go | 155 + .../protobuf/internal/filedesc/desc.go | 614 + .../protobuf/internal/filedesc/desc_init.go | 471 + .../protobuf/internal/filedesc/desc_lazy.go | 704 + .../protobuf/internal/filedesc/desc_list.go | 282 + .../internal/filedesc/desc_list_gen.go | 345 + .../protobuf/internal/filedesc/placeholder.go | 107 + .../protobuf/internal/filetype/build.go | 297 + .../protobuf/internal/flags/flags.go | 24 + .../internal/flags/proto_legacy_disable.go | 9 + .../internal/flags/proto_legacy_enable.go | 9 + .../protobuf/internal/genid/any_gen.go | 34 + .../protobuf/internal/genid/api_gen.go | 106 + .../protobuf/internal/genid/descriptor_gen.go | 829 + .../protobuf/internal/genid/doc.go | 11 + .../protobuf/internal/genid/duration_gen.go | 34 + .../protobuf/internal/genid/empty_gen.go | 19 + .../protobuf/internal/genid/field_mask_gen.go | 31 + .../protobuf/internal/genid/goname.go | 25 + .../protobuf/internal/genid/map_entry.go | 16 + .../internal/genid/source_context_gen.go | 31 + .../protobuf/internal/genid/struct_gen.go | 116 + .../protobuf/internal/genid/timestamp_gen.go | 34 + .../protobuf/internal/genid/type_gen.go | 184 + .../protobuf/internal/genid/wrappers.go | 13 + .../protobuf/internal/genid/wrappers_gen.go | 175 + .../protobuf/internal/impl/api_export.go | 177 + .../protobuf/internal/impl/checkinit.go | 141 + .../protobuf/internal/impl/codec_extension.go | 223 + .../protobuf/internal/impl/codec_field.go | 828 + .../protobuf/internal/impl/codec_gen.go | 5637 ++ .../protobuf/internal/impl/codec_map.go | 389 + .../protobuf/internal/impl/codec_map_go111.go | 37 + .../protobuf/internal/impl/codec_map_go112.go | 11 + .../protobuf/internal/impl/codec_message.go | 159 + .../internal/impl/codec_messageset.go | 120 + .../protobuf/internal/impl/codec_reflect.go | 209 + .../protobuf/internal/impl/codec_tables.go | 557 + .../protobuf/internal/impl/codec_unsafe.go | 17 + .../protobuf/internal/impl/convert.go | 467 + .../protobuf/internal/impl/convert_list.go | 141 + .../protobuf/internal/impl/convert_map.go | 121 + .../protobuf/internal/impl/decode.go | 274 + .../protobuf/internal/impl/encode.go | 199 + .../protobuf/internal/impl/enum.go | 21 + .../protobuf/internal/impl/extension.go | 156 + .../protobuf/internal/impl/legacy_enum.go | 219 + .../protobuf/internal/impl/legacy_export.go | 92 + .../internal/impl/legacy_extension.go | 175 + .../protobuf/internal/impl/legacy_file.go | 81 + .../protobuf/internal/impl/legacy_message.go | 502 + .../protobuf/internal/impl/merge.go | 176 + .../protobuf/internal/impl/merge_gen.go | 209 + .../protobuf/internal/impl/message.go | 215 + .../protobuf/internal/impl/message_reflect.go | 364 + .../internal/impl/message_reflect_field.go | 466 + .../internal/impl/message_reflect_gen.go | 249 + .../protobuf/internal/impl/pointer_reflect.go | 177 + .../protobuf/internal/impl/pointer_unsafe.go | 173 + .../protobuf/internal/impl/validate.go | 576 + .../protobuf/internal/impl/weak.go | 74 + .../protobuf/internal/mapsort/mapsort.go | 43 + .../protobuf/internal/pragma/pragma.go | 29 + .../protobuf/internal/set/ints.go | 58 + .../protobuf/internal/strs/strings.go | 196 + .../protobuf/internal/strs/strings_pure.go | 27 + .../protobuf/internal/strs/strings_unsafe.go | 94 + .../protobuf/internal/version/version.go | 79 + .../protobuf/proto/checkinit.go | 71 + .../protobuf/proto/decode.go | 274 + .../protobuf/proto/decode_gen.go | 603 + .../google.golang.org/protobuf/proto/doc.go | 94 + .../protobuf/proto/encode.go | 346 + .../protobuf/proto/encode_gen.go | 97 + .../google.golang.org/protobuf/proto/equal.go | 154 + .../protobuf/proto/extension.go | 92 + .../google.golang.org/protobuf/proto/merge.go | 139 + .../protobuf/proto/messageset.go | 88 + .../google.golang.org/protobuf/proto/proto.go | 34 + .../protobuf/proto/proto_methods.go | 19 + .../protobuf/proto/proto_reflect.go | 19 + .../google.golang.org/protobuf/proto/reset.go | 43 + .../google.golang.org/protobuf/proto/size.go | 97 + .../protobuf/proto/size_gen.go | 55 + .../protobuf/proto/wrappers.go | 29 + .../protobuf/reflect/protoreflect/methods.go | 77 + .../protobuf/reflect/protoreflect/proto.go | 504 + .../protobuf/reflect/protoreflect/source.go | 52 + .../protobuf/reflect/protoreflect/type.go | 631 + .../protobuf/reflect/protoreflect/value.go | 285 + .../reflect/protoreflect/value_pure.go | 59 + .../reflect/protoreflect/value_union.go | 411 + .../reflect/protoreflect/value_unsafe.go | 98 + .../reflect/protoregistry/registry.go | 800 + .../protobuf/runtime/protoiface/legacy.go | 15 + .../protobuf/runtime/protoiface/methods.go | 167 + .../protobuf/runtime/protoimpl/impl.go | 44 + .../protobuf/runtime/protoimpl/version.go | 56 + .../types/descriptorpb/descriptor.pb.go | 4040 + .../protobuf/types/known/anypb/any.pb.go | 494 + .../types/known/durationpb/duration.pb.go | 379 + .../protobuf/types/known/emptypb/empty.pb.go | 168 + .../types/known/timestamppb/timestamp.pb.go | 381 + vendor/gopkg.in/asn1-ber.v1/.travis.yml | 36 - .../natefinch/lumberjack.v2/.travis.yml | 6 + .../natefinch/lumberjack.v2/README.md | 7 +- .../natefinch/lumberjack.v2/lumberjack.go | 2 +- vendor/gopkg.in/yaml.v2/.travis.yml | 19 +- vendor/gopkg.in/yaml.v2/apic.go | 5 + vendor/gopkg.in/yaml.v2/decode.go | 48 +- vendor/gopkg.in/yaml.v2/go.mod | 8 +- vendor/gopkg.in/yaml.v2/resolve.go | 2 +- vendor/gopkg.in/yaml.v2/scannerc.go | 123 +- vendor/gopkg.in/yaml.v2/yaml.go | 16 +- vendor/gopkg.in/yaml.v2/yamlh.go | 1 + vendor/gopkg.in/yaml.v3/.travis.yml | 17 + vendor/gopkg.in/yaml.v3/LICENSE | 50 + vendor/gopkg.in/yaml.v3/NOTICE | 13 + vendor/gopkg.in/yaml.v3/README.md | 150 + vendor/gopkg.in/yaml.v3/apic.go | 747 + vendor/gopkg.in/yaml.v3/decode.go | 948 + vendor/gopkg.in/yaml.v3/emitterc.go | 2022 + vendor/gopkg.in/yaml.v3/encode.go | 572 + vendor/gopkg.in/yaml.v3/go.mod | 5 + vendor/gopkg.in/yaml.v3/parserc.go | 1249 + vendor/gopkg.in/yaml.v3/readerc.go | 434 + vendor/gopkg.in/yaml.v3/resolve.go | 326 + vendor/gopkg.in/yaml.v3/scannerc.go | 3028 + vendor/gopkg.in/yaml.v3/sorter.go | 134 + vendor/gopkg.in/yaml.v3/writerc.go | 48 + vendor/gopkg.in/yaml.v3/yaml.go | 693 + vendor/gopkg.in/yaml.v3/yamlh.go | 807 + vendor/gopkg.in/yaml.v3/yamlprivateh.go | 198 + vendor/modules.txt | 228 + 1353 files changed, 177271 insertions(+), 230951 deletions(-) delete mode 100644 Gopkg.lock delete mode 100644 Gopkg.toml create mode 100644 go.mod create mode 100644 go.sum delete mode 100644 vendor/github.com/blang/semver/go.mod create mode 100644 vendor/github.com/disintegration/imaging/.travis.yml create mode 100644 vendor/github.com/disintegration/imaging/LICENSE create mode 100644 vendor/github.com/disintegration/imaging/README.md create mode 100644 vendor/github.com/disintegration/imaging/adjust.go create mode 100644 vendor/github.com/disintegration/imaging/convolution.go create mode 100644 vendor/github.com/disintegration/imaging/doc.go create mode 100644 vendor/github.com/disintegration/imaging/effects.go create mode 100644 vendor/github.com/disintegration/imaging/go.mod create mode 100644 vendor/github.com/disintegration/imaging/go.sum create mode 100644 vendor/github.com/disintegration/imaging/histogram.go create mode 100644 vendor/github.com/disintegration/imaging/io.go create mode 100644 vendor/github.com/disintegration/imaging/resize.go create mode 100644 vendor/github.com/disintegration/imaging/scanner.go create mode 100644 vendor/github.com/disintegration/imaging/tools.go create mode 100644 vendor/github.com/disintegration/imaging/transform.go create mode 100644 vendor/github.com/disintegration/imaging/utils.go create mode 100644 vendor/github.com/fatih/color/LICENSE.md create mode 100644 vendor/github.com/fatih/color/README.md create mode 100644 vendor/github.com/fatih/color/color.go create mode 100644 vendor/github.com/fatih/color/doc.go create mode 100644 vendor/github.com/fatih/color/go.mod create mode 100644 vendor/github.com/fatih/color/go.sum create mode 100644 vendor/github.com/francoispqt/gojay/.gitignore create mode 100644 vendor/github.com/francoispqt/gojay/.travis.yml create mode 100644 vendor/github.com/francoispqt/gojay/Gopkg.lock create mode 100644 vendor/github.com/francoispqt/gojay/Gopkg.toml create mode 100644 vendor/github.com/francoispqt/gojay/LICENSE create mode 100644 vendor/github.com/francoispqt/gojay/Makefile create mode 100644 vendor/github.com/francoispqt/gojay/README.md create mode 100644 vendor/github.com/francoispqt/gojay/decode.go create mode 100644 vendor/github.com/francoispqt/gojay/decode_array.go create mode 100644 vendor/github.com/francoispqt/gojay/decode_bool.go create mode 100644 vendor/github.com/francoispqt/gojay/decode_embedded_json.go create mode 100644 vendor/github.com/francoispqt/gojay/decode_interface.go create mode 100644 vendor/github.com/francoispqt/gojay/decode_number.go create mode 100644 vendor/github.com/francoispqt/gojay/decode_number_float.go create mode 100644 vendor/github.com/francoispqt/gojay/decode_number_int.go create mode 100644 vendor/github.com/francoispqt/gojay/decode_number_uint.go create mode 100644 vendor/github.com/francoispqt/gojay/decode_object.go create mode 100644 vendor/github.com/francoispqt/gojay/decode_pool.go create mode 100644 vendor/github.com/francoispqt/gojay/decode_slice.go create mode 100644 vendor/github.com/francoispqt/gojay/decode_sqlnull.go create mode 100644 vendor/github.com/francoispqt/gojay/decode_stream.go create mode 100644 vendor/github.com/francoispqt/gojay/decode_stream_pool.go create mode 100644 vendor/github.com/francoispqt/gojay/decode_string.go create mode 100644 vendor/github.com/francoispqt/gojay/decode_string_unicode.go create mode 100644 vendor/github.com/francoispqt/gojay/decode_time.go create mode 100644 vendor/github.com/francoispqt/gojay/decode_unsafe.go create mode 100644 vendor/github.com/francoispqt/gojay/encode.go create mode 100644 vendor/github.com/francoispqt/gojay/encode_array.go create mode 100644 vendor/github.com/francoispqt/gojay/encode_bool.go create mode 100644 vendor/github.com/francoispqt/gojay/encode_builder.go create mode 100644 vendor/github.com/francoispqt/gojay/encode_embedded_json.go create mode 100644 vendor/github.com/francoispqt/gojay/encode_interface.go create mode 100644 vendor/github.com/francoispqt/gojay/encode_null.go create mode 100644 vendor/github.com/francoispqt/gojay/encode_number.go create mode 100644 vendor/github.com/francoispqt/gojay/encode_number_float.go create mode 100644 vendor/github.com/francoispqt/gojay/encode_number_int.go create mode 100644 vendor/github.com/francoispqt/gojay/encode_number_uint.go create mode 100644 vendor/github.com/francoispqt/gojay/encode_object.go create mode 100644 vendor/github.com/francoispqt/gojay/encode_pool.go create mode 100644 vendor/github.com/francoispqt/gojay/encode_slice.go create mode 100644 vendor/github.com/francoispqt/gojay/encode_sqlnull.go create mode 100644 vendor/github.com/francoispqt/gojay/encode_stream.go create mode 100644 vendor/github.com/francoispqt/gojay/encode_stream_pool.go create mode 100644 vendor/github.com/francoispqt/gojay/encode_string.go create mode 100644 vendor/github.com/francoispqt/gojay/encode_time.go create mode 100644 vendor/github.com/francoispqt/gojay/errors.go create mode 100644 vendor/github.com/francoispqt/gojay/go.mod create mode 100644 vendor/github.com/francoispqt/gojay/go.sum create mode 100644 vendor/github.com/francoispqt/gojay/gojay.go create mode 100644 vendor/github.com/francoispqt/gojay/gojay.png create mode 100644 vendor/github.com/fsnotify/fsnotify/.gitattributes create mode 100644 vendor/github.com/fsnotify/fsnotify/go.mod create mode 100644 vendor/github.com/fsnotify/fsnotify/go.sum create mode 100644 vendor/github.com/go-asn1-ber/asn1-ber/.travis.yml rename vendor/{gopkg.in/asn1-ber.v1 => github.com/go-asn1-ber/asn1-ber}/LICENSE (100%) rename vendor/{gopkg.in/asn1-ber.v1 => github.com/go-asn1-ber/asn1-ber}/README.md (100%) rename vendor/{gopkg.in/asn1-ber.v1 => github.com/go-asn1-ber/asn1-ber}/ber.go (69%) rename vendor/{gopkg.in/asn1-ber.v1 => github.com/go-asn1-ber/asn1-ber}/content_int.go (87%) create mode 100644 vendor/github.com/go-asn1-ber/asn1-ber/generalizedTime.go create mode 100644 vendor/github.com/go-asn1-ber/asn1-ber/go.mod rename vendor/{gopkg.in/asn1-ber.v1 => github.com/go-asn1-ber/asn1-ber}/header.go (75%) rename vendor/{gopkg.in/asn1-ber.v1 => github.com/go-asn1-ber/asn1-ber}/identifier.go (100%) rename vendor/{gopkg.in/asn1-ber.v1 => github.com/go-asn1-ber/asn1-ber}/length.go (85%) create mode 100644 vendor/github.com/go-asn1-ber/asn1-ber/real.go rename vendor/{gopkg.in/asn1-ber.v1 => github.com/go-asn1-ber/asn1-ber}/util.go (93%) create mode 100644 vendor/github.com/golang/protobuf/proto/buffer.go delete mode 100644 vendor/github.com/golang/protobuf/proto/clone.go delete mode 100644 vendor/github.com/golang/protobuf/proto/decode.go create mode 100644 vendor/github.com/golang/protobuf/proto/defaults.go delete mode 100644 vendor/github.com/golang/protobuf/proto/encode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/equal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/lib.go delete mode 100644 vendor/github.com/golang/protobuf/proto/message_set.go delete mode 100644 vendor/github.com/golang/protobuf/proto/pointer_reflect.go delete mode 100644 vendor/github.com/golang/protobuf/proto/pointer_unsafe.go create mode 100644 vendor/github.com/golang/protobuf/proto/proto.go create mode 100644 vendor/github.com/golang/protobuf/proto/registry.go delete mode 100644 vendor/github.com/golang/protobuf/proto/table_marshal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/table_merge.go delete mode 100644 vendor/github.com/golang/protobuf/proto/table_unmarshal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_decode.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_encode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text_parser.go create mode 100644 vendor/github.com/golang/protobuf/proto/wire.go create mode 100644 vendor/github.com/golang/protobuf/proto/wrappers.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.proto delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.proto create mode 100644 vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto create mode 100644 vendor/github.com/hashicorp/errwrap/LICENSE create mode 100644 vendor/github.com/hashicorp/errwrap/README.md create mode 100644 vendor/github.com/hashicorp/errwrap/errwrap.go create mode 100644 vendor/github.com/hashicorp/errwrap/go.mod create mode 100644 vendor/github.com/hashicorp/go-hclog/colorize_unix.go create mode 100644 vendor/github.com/hashicorp/go-hclog/colorize_windows.go create mode 100644 vendor/github.com/hashicorp/go-hclog/exclude.go create mode 100644 vendor/github.com/hashicorp/go-hclog/interceptlogger.go create mode 100644 vendor/github.com/hashicorp/go-multierror/.travis.yml create mode 100644 vendor/github.com/hashicorp/go-multierror/LICENSE create mode 100644 vendor/github.com/hashicorp/go-multierror/Makefile create mode 100644 vendor/github.com/hashicorp/go-multierror/README.md create mode 100644 vendor/github.com/hashicorp/go-multierror/append.go create mode 100644 vendor/github.com/hashicorp/go-multierror/flatten.go create mode 100644 vendor/github.com/hashicorp/go-multierror/format.go create mode 100644 vendor/github.com/hashicorp/go-multierror/go.mod create mode 100644 vendor/github.com/hashicorp/go-multierror/go.sum create mode 100644 vendor/github.com/hashicorp/go-multierror/group.go create mode 100644 vendor/github.com/hashicorp/go-multierror/multierror.go create mode 100644 vendor/github.com/hashicorp/go-multierror/prefix.go create mode 100644 vendor/github.com/hashicorp/go-multierror/sort.go create mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_stdio.go create mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go create mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto create mode 100644 vendor/github.com/mattermost/ldap/go.mod create mode 100644 vendor/github.com/mattermost/ldap/go.sum create mode 100644 vendor/github.com/mattermost/ldap/request.go create mode 100644 vendor/github.com/mattermost/logr/.gitignore create mode 100644 vendor/github.com/mattermost/logr/.travis.yml create mode 100644 vendor/github.com/mattermost/logr/LICENSE create mode 100644 vendor/github.com/mattermost/logr/README.md create mode 100644 vendor/github.com/mattermost/logr/config.go create mode 100644 vendor/github.com/mattermost/logr/const.go create mode 100644 vendor/github.com/mattermost/logr/filter.go create mode 100644 vendor/github.com/mattermost/logr/format/json.go create mode 100644 vendor/github.com/mattermost/logr/format/plain.go create mode 100644 vendor/github.com/mattermost/logr/formatter.go create mode 100644 vendor/github.com/mattermost/logr/go.mod create mode 100644 vendor/github.com/mattermost/logr/go.sum create mode 100644 vendor/github.com/mattermost/logr/levelcache.go create mode 100644 vendor/github.com/mattermost/logr/levelcustom.go create mode 100644 vendor/github.com/mattermost/logr/levelstd.go create mode 100644 vendor/github.com/mattermost/logr/logger.go create mode 100644 vendor/github.com/mattermost/logr/logr.go create mode 100644 vendor/github.com/mattermost/logr/logrec.go create mode 100644 vendor/github.com/mattermost/logr/metrics.go create mode 100644 vendor/github.com/mattermost/logr/target.go create mode 100644 vendor/github.com/mattermost/logr/target/file.go create mode 100644 vendor/github.com/mattermost/logr/target/syslog.go create mode 100644 vendor/github.com/mattermost/logr/target/writer.go create mode 100644 vendor/github.com/mattermost/logr/timeout.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/mlog/default.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/mlog/global.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/mlog/log.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/model/channel_search.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/model/command_args.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/model/file_info.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/model/group.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/model/ldap.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/model/mfa_secret.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/model/migration.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/model/push_notification.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/model/role.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/model/saml.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/model/search_params.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/model/session.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/model/system.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/model/team_member.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/model/team_search.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/model/user_count.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/model/websocket_client.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/model/websocket_message.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/plugin/health_check.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/plugin/helpers.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/plugin/helpers_bots.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/plugin/helpers_kv.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/plugin/plugintest/helpers.go delete mode 100755 vendor/github.com/mattermost/mattermost-server/scripts/license-check.sh delete mode 100644 vendor/github.com/mattermost/mattermost-server/utils/api.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/utils/extract.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/utils/lru.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/utils/path.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/utils/test_files_compiler.go delete mode 100644 vendor/github.com/mattermost/mattermost-server/utils/utils.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/LICENSE.txt (99%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/NOTICE.txt (85%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/einterfaces/account_migration.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/einterfaces/cloud.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/einterfaces/cluster.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/einterfaces/compliance.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/einterfaces/data_retention.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/einterfaces/ldap.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/einterfaces/message_export.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/einterfaces/metrics.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/einterfaces/mfa.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/einterfaces/notification.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/einterfaces/oauthproviders.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/einterfaces/saml.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/mlog/default.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/mlog/errors.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/mlog/global.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/mlog/levels.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/mlog/log.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/mlog/logr.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/mlog/stdlog.go (97%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/mlog/sugar.go (96%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/mlog/syslog.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/mlog/tcp.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/mlog/test-tls-client-cert.pem rename vendor/github.com/mattermost/mattermost-server/{ => v5}/mlog/testing.go (68%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/access.go (92%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/analytics_row.go (52%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/at_mentions.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/audit.go (92%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/auditconv.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/audits.go (75%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/authorize.go (96%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/bot.go (82%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/bot_default_image.go (99%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/builtin.go (86%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/bundle_info.go (86%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/channel.go (71%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/channel_count.go (95%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/channel_data.go (93%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/channel_list.go (87%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/channel_member.go (83%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/channel_member_history.go (54%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/channel_member_history_result.go (53%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/channel_mentions.go (92%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/channel_search.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/channel_sidebar.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/channel_stats.go (61%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/channel_view.go (87%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/client4.go (77%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/cloud.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/cluster_discovery.go (95%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/cluster_info.go (78%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/cluster_message.go (51%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/cluster_stats.go (73%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/command.go (72%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/command_args.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/command_autocomplete.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/command_request.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/command_response.go (57%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/command_webhook.go (81%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/compliance.go (64%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/compliance_post.go (59%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/config.go (50%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/data_retention_policy.go (62%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/emoji.go (94%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/emoji_data.go (99%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/emoji_search.go (77%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/feature_flags.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/file.go (86%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/file_info.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/file_info_list.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/file_info_search_results.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/gitlab.go (74%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/group.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/group_member.go (83%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/group_syncable.go (88%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/guest_invite.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/incoming_webhook.go (96%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/initial_load.go (67%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/integration_action.go (84%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/integrity.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/job.go (77%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/ldap.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/license.go (65%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/link_metadata.go (95%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/manifest.go (56%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/marketplace_plugin.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/mention_map.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/message_export.go (80%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/mfa_secret.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/migration.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/oauth.go (97%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/outgoing_webhook.go (97%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/permission.go (50%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/plugin_event_data.go (100%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/plugin_key_value.go (90%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/plugin_kvset_options.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/plugin_status.go (100%) rename vendor/github.com/mattermost/mattermost-server/{plugin/valid.go => v5/model/plugin_valid.go} (77%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/plugins_response.go (91%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/post.go (67%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/post_embed.go (85%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/post_list.go (96%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/post_metadata.go (97%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/post_search_results.go (90%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/preference.go (75%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/preferences.go (82%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/product_notices.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/push_notification.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/push_response.go (87%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/reaction.go (89%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/role.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/saml.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/scheduled_task.go (92%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/scheme.go (94%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/search_params.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/security_bulletin.go (71%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/serialized_gen.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/session.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/slack_attachment.go (96%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/slack_compatibility.go (86%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/status.go (73%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/suggest_command.go (91%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/switch_request.go (81%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/system.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/team.go (89%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/team_member.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/team_search.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/team_stats.go (80%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/terms_of_service.go (94%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/thread.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/token.go (85%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/typing_request.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/upload_session.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/user.go (85%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/user_access_token.go (90%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/user_access_token_search.go (84%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/user_autocomplete.go (88%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/user_count.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/user_get.go (60%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/user_search.go (56%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/user_terms_of_service.go (88%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/users_stats.go (75%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/utils.go (79%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/version.go (91%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/websocket_client.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/model/websocket_message.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/model/websocket_request.go (52%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/plugin/api.go (65%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/plugin/api_timer_layer_generated.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/plugin/client.go (94%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/plugin/client_rpc.go (64%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/plugin/client_rpc_generated.go (83%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/plugin/context.go (95%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/plugin/doc.go (100%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/plugin/environment.go (62%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/plugin/hclog_adapter.go (77%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/plugin/health_check.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/plugin/helpers.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/plugin/helpers_bots.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/plugin/helpers_config.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/plugin/helpers_kv.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/plugin/helpers_plugin.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/plugin/hijack.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/plugin/hooks.go (77%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/plugin/hooks_timer_layer_generated.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/plugin/http.go (70%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/plugin/io_rpc.go (100%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/plugin/plugintest/api.go (82%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/plugin/plugintest/doc.go (60%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/plugin/plugintest/helpers.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/plugin/plugintest/hooks.go (91%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/plugin/plugintest/mock/mock.go (88%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/plugin/stringifier.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/plugin/supervisor.go (75%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/services/timezones/default.go (99%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/services/timezones/timezones.go (93%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/utils/api.go create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/utils/archive.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/authorization.go (98%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/backoff.go (84%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/emoji.go (55%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/file.go (94%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/fileutils/fileutils.go (65%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/hash.go (63%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/html.go (75%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/i18n.go (80%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/jsonutils/json.go (96%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/license.go (69%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/logger.go (57%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/markdown/autolink.go (95%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/markdown/block_quote.go (92%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/markdown/blocks.go (86%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/markdown/document.go (76%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/markdown/fenced_code.go (95%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/markdown/html.go (97%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/markdown/html_entities.go (99%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/markdown/indented_code.go (94%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/markdown/inlines.go (98%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/markdown/inspect.go (94%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/markdown/lines.go (69%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/markdown/links.go (91%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/markdown/list.go (98%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/markdown/markdown.go (96%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/markdown/paragraph.go (93%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/markdown/reference_definition.go (94%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/merge.go (97%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/password.go (86%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/policies-roles-mapping.json (99%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/random.go (86%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/subpath.go (81%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/utils/test_files_compiler.go rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/textgeneration.go (97%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/time.go (93%) rename vendor/github.com/mattermost/mattermost-server/{ => v5}/utils/urlencode.go (86%) create mode 100644 vendor/github.com/mattermost/mattermost-server/v5/utils/utils.go create mode 100644 vendor/github.com/mattn/go-colorable/.travis.yml create mode 100644 vendor/github.com/mattn/go-colorable/LICENSE create mode 100644 vendor/github.com/mattn/go-colorable/README.md create mode 100644 vendor/github.com/mattn/go-colorable/colorable_appengine.go create mode 100644 vendor/github.com/mattn/go-colorable/colorable_others.go create mode 100644 vendor/github.com/mattn/go-colorable/colorable_windows.go create mode 100644 vendor/github.com/mattn/go-colorable/go.mod create mode 100644 vendor/github.com/mattn/go-colorable/go.sum create mode 100644 vendor/github.com/mattn/go-colorable/go.test.sh create mode 100644 vendor/github.com/mattn/go-colorable/noncolorable.go create mode 100644 vendor/github.com/mattn/go-isatty/.travis.yml create mode 100644 vendor/github.com/mattn/go-isatty/LICENSE create mode 100644 vendor/github.com/mattn/go-isatty/README.md create mode 100644 vendor/github.com/mattn/go-isatty/doc.go create mode 100644 vendor/github.com/mattn/go-isatty/go.mod create mode 100644 vendor/github.com/mattn/go-isatty/go.sum create mode 100644 vendor/github.com/mattn/go-isatty/go.test.sh create mode 100644 vendor/github.com/mattn/go-isatty/isatty_bsd.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_others.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_plan9.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_solaris.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_tcgets.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_windows.go create mode 100644 vendor/github.com/mattn/go-isatty/renovate.json delete mode 100644 vendor/github.com/mitchellh/go-testing-interface/testing_go19.go delete mode 100644 vendor/github.com/oklog/run/.travis.yml create mode 100644 vendor/github.com/oklog/run/actors.go create mode 100644 vendor/github.com/oklog/run/go.mod create mode 100644 vendor/github.com/pborman/uuid/go.mod create mode 100644 vendor/github.com/pborman/uuid/go.sum delete mode 100644 vendor/github.com/pelletier/go-toml/.travis.yml create mode 100644 vendor/github.com/pelletier/go-toml/Makefile delete mode 100644 vendor/github.com/pelletier/go-toml/appveyor.yml create mode 100644 vendor/github.com/pelletier/go-toml/azure-pipelines.yml delete mode 100644 vendor/github.com/pelletier/go-toml/benchmark.json mode change 100755 => 100644 vendor/github.com/pelletier/go-toml/benchmark.sh delete mode 100644 vendor/github.com/pelletier/go-toml/benchmark.toml delete mode 100644 vendor/github.com/pelletier/go-toml/benchmark.yml mode change 100755 => 100644 vendor/github.com/pelletier/go-toml/fuzz.sh create mode 100644 vendor/github.com/pelletier/go-toml/fuzzit.sh create mode 100644 vendor/github.com/pelletier/go-toml/localtime.go delete mode 100644 vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_Map_test.toml create mode 100644 vendor/github.com/philhofer/fwd/LICENSE.md create mode 100644 vendor/github.com/philhofer/fwd/README.md create mode 100644 vendor/github.com/philhofer/fwd/reader.go create mode 100644 vendor/github.com/philhofer/fwd/writer.go create mode 100644 vendor/github.com/philhofer/fwd/writer_appengine.go create mode 100644 vendor/github.com/philhofer/fwd/writer_unsafe.go create mode 100644 vendor/github.com/pkg/errors/Makefile create mode 100644 vendor/github.com/pkg/errors/go113.go delete mode 100644 vendor/github.com/stretchr/objx/Gopkg.lock delete mode 100644 vendor/github.com/stretchr/objx/Gopkg.toml delete mode 100644 vendor/github.com/stretchr/objx/constants.go create mode 100644 vendor/github.com/stretchr/objx/go.mod create mode 100644 vendor/github.com/stretchr/objx/go.sum create mode 100644 vendor/github.com/stretchr/objx/type_specific.go create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_compare.go create mode 100644 vendor/github.com/tinylib/msgp/LICENSE create mode 100644 vendor/github.com/tinylib/msgp/msgp/advise_linux.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/advise_other.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/circular.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/defs.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/edit.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/elsize.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/errors.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/extension.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/file.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/file_port.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/integers.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/json.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/json_bytes.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/number.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/purego.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/read.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/read_bytes.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/size.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/unsafe.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/write.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/write_bytes.go create mode 100644 vendor/github.com/wiggin77/cfg/.gitignore create mode 100644 vendor/github.com/wiggin77/cfg/.travis.yml create mode 100644 vendor/github.com/wiggin77/cfg/LICENSE create mode 100644 vendor/github.com/wiggin77/cfg/README.md create mode 100644 vendor/github.com/wiggin77/cfg/config.go create mode 100644 vendor/github.com/wiggin77/cfg/go.mod create mode 100644 vendor/github.com/wiggin77/cfg/go.sum create mode 100644 vendor/github.com/wiggin77/cfg/ini/ini.go create mode 100644 vendor/github.com/wiggin77/cfg/ini/parser.go create mode 100644 vendor/github.com/wiggin77/cfg/ini/section.go create mode 100644 vendor/github.com/wiggin77/cfg/listener.go create mode 100644 vendor/github.com/wiggin77/cfg/nocopy.go create mode 100644 vendor/github.com/wiggin77/cfg/source.go create mode 100644 vendor/github.com/wiggin77/cfg/srcfile.go create mode 100644 vendor/github.com/wiggin77/cfg/srcmap.go create mode 100644 vendor/github.com/wiggin77/cfg/timeconv/parse.go create mode 100644 vendor/github.com/wiggin77/merror/.gitignore create mode 100644 vendor/github.com/wiggin77/merror/.travis.yml create mode 100644 vendor/github.com/wiggin77/merror/LICENSE create mode 100644 vendor/github.com/wiggin77/merror/README.md create mode 100644 vendor/github.com/wiggin77/merror/format.go create mode 100644 vendor/github.com/wiggin77/merror/go.mod create mode 100644 vendor/github.com/wiggin77/merror/go.sum create mode 100644 vendor/github.com/wiggin77/merror/merror.go create mode 100644 vendor/github.com/wiggin77/srslog/.gitignore create mode 100644 vendor/github.com/wiggin77/srslog/.travis.yml create mode 100644 vendor/github.com/wiggin77/srslog/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/wiggin77/srslog/LICENSE create mode 100644 vendor/github.com/wiggin77/srslog/README.md create mode 100644 vendor/github.com/wiggin77/srslog/constants.go create mode 100644 vendor/github.com/wiggin77/srslog/dialer.go create mode 100644 vendor/github.com/wiggin77/srslog/formatter.go create mode 100644 vendor/github.com/wiggin77/srslog/framer.go create mode 100644 vendor/github.com/wiggin77/srslog/go.mod create mode 100644 vendor/github.com/wiggin77/srslog/logger.go create mode 100644 vendor/github.com/wiggin77/srslog/net_conn.go create mode 100644 vendor/github.com/wiggin77/srslog/srslog.go create mode 100644 vendor/github.com/wiggin77/srslog/srslog_unix.go create mode 100644 vendor/github.com/wiggin77/srslog/writer.go create mode 100644 vendor/go.uber.org/atomic/CHANGELOG.md delete mode 100644 vendor/go.uber.org/atomic/atomic.go create mode 100644 vendor/go.uber.org/atomic/bool.go create mode 100644 vendor/go.uber.org/atomic/bool_ext.go create mode 100644 vendor/go.uber.org/atomic/doc.go create mode 100644 vendor/go.uber.org/atomic/duration.go create mode 100644 vendor/go.uber.org/atomic/duration_ext.go create mode 100644 vendor/go.uber.org/atomic/error_ext.go create mode 100644 vendor/go.uber.org/atomic/float64.go create mode 100644 vendor/go.uber.org/atomic/float64_ext.go create mode 100644 vendor/go.uber.org/atomic/gen.go delete mode 100644 vendor/go.uber.org/atomic/glide.lock delete mode 100644 vendor/go.uber.org/atomic/glide.yaml create mode 100644 vendor/go.uber.org/atomic/go.mod create mode 100644 vendor/go.uber.org/atomic/go.sum create mode 100644 vendor/go.uber.org/atomic/int32.go create mode 100644 vendor/go.uber.org/atomic/int64.go create mode 100644 vendor/go.uber.org/atomic/nocmp.go create mode 100644 vendor/go.uber.org/atomic/string_ext.go create mode 100644 vendor/go.uber.org/atomic/uint32.go create mode 100644 vendor/go.uber.org/atomic/uint64.go create mode 100644 vendor/go.uber.org/atomic/value.go delete mode 100644 vendor/go.uber.org/multierr/glide.lock create mode 100644 vendor/go.uber.org/multierr/go.mod create mode 100644 vendor/go.uber.org/multierr/go.sum create mode 100644 vendor/go.uber.org/multierr/go113.go rename vendor/go.uber.org/zap/{check_license.sh => checklicense.sh} (100%) mode change 100755 => 100644 delete mode 100644 vendor/go.uber.org/zap/glide.lock create mode 100644 vendor/go.uber.org/zap/go.mod create mode 100644 vendor/go.uber.org/zap/go.sum create mode 100644 vendor/go.uber.org/zap/zapcore/increase_level.go create mode 100644 vendor/golang.org/x/image/AUTHORS create mode 100644 vendor/golang.org/x/image/CONTRIBUTORS create mode 100644 vendor/golang.org/x/image/LICENSE create mode 100644 vendor/golang.org/x/image/PATENTS create mode 100644 vendor/golang.org/x/image/bmp/reader.go create mode 100644 vendor/golang.org/x/image/bmp/writer.go create mode 100644 vendor/golang.org/x/image/ccitt/reader.go create mode 100644 vendor/golang.org/x/image/ccitt/table.go create mode 100644 vendor/golang.org/x/image/ccitt/writer.go create mode 100644 vendor/golang.org/x/image/tiff/buffer.go create mode 100644 vendor/golang.org/x/image/tiff/compress.go create mode 100644 vendor/golang.org/x/image/tiff/consts.go create mode 100644 vendor/golang.org/x/image/tiff/fuzz.go create mode 100644 vendor/golang.org/x/image/tiff/lzw/reader.go create mode 100644 vendor/golang.org/x/image/tiff/reader.go create mode 100644 vendor/golang.org/x/image/tiff/writer.go delete mode 100644 vendor/golang.org/x/net/html/atom/gen.go create mode 100644 vendor/golang.org/x/net/idna/tables12.0.0.go create mode 100644 vendor/golang.org/x/net/idna/tables13.0.0.go create mode 100644 vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s create mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/fdset.go mode change 100755 => 100644 vendor/golang.org/x/sys/unix/mkall.sh delete mode 100644 vendor/golang.org/x/sys/unix/mkasm_darwin.go mode change 100755 => 100644 vendor/golang.org/x/sys/unix/mkerrors.sh delete mode 100644 vendor/golang.org/x/sys/unix/mkpost.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_solaris.go delete mode 100644 vendor/golang.org/x/sys/unix/mksysctl_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/mksysnum.go create mode 100644 vendor/golang.org/x/sys/unix/ptrace_darwin.go create mode 100644 vendor/golang.org/x/sys/unix/ptrace_ios.go create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_dragonfly.go create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_illumos.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/types_aix.go delete mode 100644 vendor/golang.org/x/sys/unix/types_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/types_dragonfly.go delete mode 100644 vendor/golang.org/x/sys/unix/types_freebsd.go delete mode 100644 vendor/golang.org/x/sys/unix/types_netbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/types_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/types_solaris.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go rename vendor/golang.org/x/sys/unix/{zptracearm_linux.go => zptrace_armnn_linux.go} (93%) create mode 100644 vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go rename vendor/golang.org/x/sys/unix/{zptracemips_linux.go => zptrace_mipsnn_linux.go} (93%) rename vendor/golang.org/x/sys/unix/{zptracemipsle_linux.go => zptrace_mipsnnle_linux.go} (93%) rename vendor/golang.org/x/sys/unix/{zptrace386_linux.go => zptrace_x86_linux.go} (95%) create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go rename vendor/golang.org/x/sys/unix/{zsyscall_darwin_386.1_11.go => zsyscall_linux.go} (54%) rename vendor/golang.org/x/sys/unix/{zsyscall_darwin_arm64.1_11.go => zsyscall_openbsd_mips64.go} (85%) create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go delete mode 100644 vendor/golang.org/x/text/collate/collate.go delete mode 100644 vendor/golang.org/x/text/collate/index.go delete mode 100644 vendor/golang.org/x/text/collate/maketables.go delete mode 100644 vendor/golang.org/x/text/collate/option.go delete mode 100644 vendor/golang.org/x/text/collate/sort.go delete mode 100644 vendor/golang.org/x/text/collate/tables.go delete mode 100644 vendor/golang.org/x/text/internal/colltab/collelem.go delete mode 100644 vendor/golang.org/x/text/internal/colltab/colltab.go delete mode 100644 vendor/golang.org/x/text/internal/colltab/contract.go delete mode 100644 vendor/golang.org/x/text/internal/colltab/iter.go delete mode 100644 vendor/golang.org/x/text/internal/colltab/numeric.go delete mode 100644 vendor/golang.org/x/text/internal/colltab/table.go delete mode 100644 vendor/golang.org/x/text/internal/colltab/trie.go delete mode 100644 vendor/golang.org/x/text/internal/colltab/weighter.go delete mode 100644 vendor/golang.org/x/text/internal/gen/code.go delete mode 100644 vendor/golang.org/x/text/internal/gen/gen.go delete mode 100644 vendor/golang.org/x/text/internal/language/compact/gen.go delete mode 100644 vendor/golang.org/x/text/internal/language/compact/gen_index.go delete mode 100644 vendor/golang.org/x/text/internal/language/compact/gen_parents.go delete mode 100644 vendor/golang.org/x/text/internal/language/gen.go delete mode 100644 vendor/golang.org/x/text/internal/language/gen_common.go delete mode 100644 vendor/golang.org/x/text/internal/triegen/compact.go delete mode 100644 vendor/golang.org/x/text/internal/triegen/print.go delete mode 100644 vendor/golang.org/x/text/internal/triegen/triegen.go delete mode 100644 vendor/golang.org/x/text/internal/ucd/ucd.go delete mode 100644 vendor/golang.org/x/text/language/gen.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_ranges.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_trieval.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/cldr/base.go delete mode 100644 vendor/golang.org/x/text/unicode/cldr/cldr.go delete mode 100644 vendor/golang.org/x/text/unicode/cldr/collate.go delete mode 100644 vendor/golang.org/x/text/unicode/cldr/decode.go delete mode 100644 vendor/golang.org/x/text/unicode/cldr/makexml.go delete mode 100644 vendor/golang.org/x/text/unicode/cldr/resolve.go delete mode 100644 vendor/golang.org/x/text/unicode/cldr/slice.go delete mode 100644 vendor/golang.org/x/text/unicode/cldr/xml.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/maketables.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables12.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables13.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/triegen.go delete mode 100644 vendor/golang.org/x/text/unicode/rangetable/gen.go delete mode 100644 vendor/golang.org/x/text/unicode/rangetable/merge.go delete mode 100644 vendor/golang.org/x/text/unicode/rangetable/rangetable.go delete mode 100644 vendor/golang.org/x/text/unicode/rangetable/tables10.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/rangetable/tables11.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/rangetable/tables9.0.0.go create mode 100644 vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md create mode 100644 vendor/google.golang.org/grpc/GOVERNANCE.md create mode 100644 vendor/google.golang.org/grpc/MAINTAINERS.md create mode 100644 vendor/google.golang.org/grpc/attributes/attributes.go create mode 100644 vendor/google.golang.org/grpc/backoff/backoff.go delete mode 100644 vendor/google.golang.org/grpc/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/state/state.go delete mode 100644 vendor/google.golang.org/grpc/balancer_v1_wrapper.go mode change 100755 => 100644 vendor/google.golang.org/grpc/codegen.sh rename vendor/google.golang.org/grpc/credentials/{tls13.go => go12.go} (100%) create mode 100644 vendor/google.golang.org/grpc/credentials/tls.go create mode 100644 vendor/google.golang.org/grpc/grpclog/component.go create mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go rename vendor/google.golang.org/grpc/{internal/transport/log.go => health/logging.go} (53%) delete mode 100755 vendor/google.golang.org/grpc/health/regenerate.sh mode change 100755 => 100644 vendor/google.golang.org/grpc/install_gae.sh delete mode 100755 vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/util.go create mode 100644 vendor/google.golang.org/grpc/internal/buffer/unbounded.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/logging.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/spiffe.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go rename vendor/google.golang.org/grpc/{credentials/internal => internal/credentials}/syscallconn.go (96%) rename vendor/google.golang.org/grpc/{credentials/internal => internal/credentials}/syscallconn_appengine.go (97%) create mode 100644 vendor/google.golang.org/grpc/internal/credentials/util.go create mode 100644 vendor/google.golang.org/grpc/internal/grpclog/grpclog.go create mode 100644 vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/metadata.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/method.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/target.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/config_selector.go rename vendor/google.golang.org/grpc/{ => internal}/resolver/dns/dns_resolver.go (71%) create mode 100644 vendor/google.golang.org/grpc/internal/resolver/dns/go113.go rename vendor/google.golang.org/grpc/{ => internal}/resolver/passthrough/passthrough.go (94%) create mode 100644 vendor/google.golang.org/grpc/internal/resolver/unix/unix.go create mode 100644 vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go create mode 100644 vendor/google.golang.org/grpc/internal/status/status.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go rename vendor/google.golang.org/grpc/{ => internal/transport}/proxy.go (73%) delete mode 100644 vendor/google.golang.org/grpc/naming/dns_resolver.go delete mode 100644 vendor/google.golang.org/grpc/naming/naming.go create mode 100644 vendor/google.golang.org/grpc/reflection/README.md create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/serverreflection.go create mode 100644 vendor/google.golang.org/grpc/regenerate.sh mode change 100755 => 100644 vendor/google.golang.org/grpc/vet.sh create mode 100644 vendor/google.golang.org/protobuf/AUTHORS create mode 100644 vendor/google.golang.org/protobuf/CONTRIBUTORS create mode 100644 vendor/google.golang.org/protobuf/LICENSE create mode 100644 vendor/google.golang.org/protobuf/PATENTS create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/decode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/doc.go create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/encode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protowire/wire.go create mode 100644 vendor/google.golang.org/protobuf/internal/descfmt/stringer.go create mode 100644 vendor/google.golang.org/protobuf/internal/descopts/options.go create mode 100644 vendor/google.golang.org/protobuf/internal/detrand/rand.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/defval/default.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/doc.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/encode.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/errors.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go112.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go113.go create mode 100644 vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/build.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go create mode 100644 vendor/google.golang.org/protobuf/internal/filetype/build.go create mode 100644 vendor/google.golang.org/protobuf/internal/flags/flags.go create mode 100644 vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go create mode 100644 vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/any_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/api_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/doc.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/duration_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/empty_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/goname.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/map_entry.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/struct_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/type_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/api_export.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/checkinit.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_field.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_tables.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_list.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_map.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/encode.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/enum.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_export.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_file.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/validate.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/weak.go create mode 100644 vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go create mode 100644 vendor/google.golang.org/protobuf/internal/pragma/pragma.go create mode 100644 vendor/google.golang.org/protobuf/internal/set/ints.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_pure.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/version/version.go create mode 100644 vendor/google.golang.org/protobuf/proto/checkinit.go create mode 100644 vendor/google.golang.org/protobuf/proto/decode.go create mode 100644 vendor/google.golang.org/protobuf/proto/decode_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/doc.go create mode 100644 vendor/google.golang.org/protobuf/proto/encode.go create mode 100644 vendor/google.golang.org/protobuf/proto/encode_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/equal.go create mode 100644 vendor/google.golang.org/protobuf/proto/extension.go create mode 100644 vendor/google.golang.org/protobuf/proto/merge.go create mode 100644 vendor/google.golang.org/protobuf/proto/messageset.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto_methods.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto_reflect.go create mode 100644 vendor/google.golang.org/protobuf/proto/reset.go create mode 100644 vendor/google.golang.org/protobuf/proto/size.go create mode 100644 vendor/google.golang.org/protobuf/proto/size_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/wrappers.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/source.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/type.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoiface/methods.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/version.go create mode 100644 vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go delete mode 100644 vendor/gopkg.in/asn1-ber.v1/.travis.yml create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml create mode 100644 vendor/gopkg.in/yaml.v3/.travis.yml create mode 100644 vendor/gopkg.in/yaml.v3/LICENSE create mode 100644 vendor/gopkg.in/yaml.v3/NOTICE create mode 100644 vendor/gopkg.in/yaml.v3/README.md create mode 100644 vendor/gopkg.in/yaml.v3/apic.go create mode 100644 vendor/gopkg.in/yaml.v3/decode.go create mode 100644 vendor/gopkg.in/yaml.v3/emitterc.go create mode 100644 vendor/gopkg.in/yaml.v3/encode.go create mode 100644 vendor/gopkg.in/yaml.v3/go.mod create mode 100644 vendor/gopkg.in/yaml.v3/parserc.go create mode 100644 vendor/gopkg.in/yaml.v3/readerc.go create mode 100644 vendor/gopkg.in/yaml.v3/resolve.go create mode 100644 vendor/gopkg.in/yaml.v3/scannerc.go create mode 100644 vendor/gopkg.in/yaml.v3/sorter.go create mode 100644 vendor/gopkg.in/yaml.v3/writerc.go create mode 100644 vendor/gopkg.in/yaml.v3/yaml.go create mode 100644 vendor/gopkg.in/yaml.v3/yamlh.go create mode 100644 vendor/gopkg.in/yaml.v3/yamlprivateh.go create mode 100644 vendor/modules.txt diff --git a/Gopkg.lock b/Gopkg.lock deleted file mode 100644 index e3d804a..0000000 --- a/Gopkg.lock +++ /dev/null @@ -1,381 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - digest = "1:b6d886569181ec96ca83d529f4d6ba0cbf92ace7bb6f633f90c5f34d9bba7aab" - name = "github.com/blang/semver" - packages = ["."] - pruneopts = "UT" - revision = "ba2c2ddd89069b46a7011d4106f6868f17ee1705" - version = "v3.6.1" - -[[projects]] - digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" - name = "github.com/davecgh/go-spew" - packages = ["spew"] - pruneopts = "UT" - revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" - version = "v1.1.1" - -[[projects]] - branch = "master" - digest = "1:665c8850f673be8d358fe61ab00412049e685202aa7466ae5c72ad50d89c84f7" - name = "github.com/dyatlov/go-opengraph" - packages = ["opengraph"] - pruneopts = "UT" - revision = "816b6608b3c8c1e871bc9cf777f390e2532081fe" - -[[projects]] - digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd" - name = "github.com/fsnotify/fsnotify" - packages = ["."] - pruneopts = "UT" - revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" - version = "v1.4.7" - -[[projects]] - digest = "1:f5ce1529abc1204444ec73779f44f94e2fa8fcdb7aca3c355b0c95947e4005c6" - name = "github.com/golang/protobuf" - packages = [ - "proto", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/timestamp", - ] - pruneopts = "UT" - revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7" - version = "v1.3.2" - -[[projects]] - digest = "1:582b704bebaa06b48c29b0cec224a6058a09c86883aaddabde889cd1a5f73e1b" - name = "github.com/google/uuid" - packages = ["."] - pruneopts = "UT" - revision = "0cd6bf5da1e1c83f8b45653022c74f71af0538a4" - version = "v1.1.1" - -[[projects]] - digest = "1:e62657cca9badaa308d86e7716083e4c5933bb78e30a17743fc67f50be26f6f4" - name = "github.com/gorilla/websocket" - packages = ["."] - pruneopts = "UT" - revision = "c3e18be99d19e6b3e8f1559eea2c161a665c4b6b" - version = "v1.4.1" - -[[projects]] - digest = "1:cf6b61e1b4c26b0c7526cee4a0cee6d8302b17798af4b2a56a90eedac0aef11a" - name = "github.com/hashicorp/go-hclog" - packages = ["."] - pruneopts = "UT" - revision = "5ccdce08c75b6c7b37af61159f13f6a4f5e2e928" - version = "v0.9.2" - -[[projects]] - digest = "1:5d8ebc1cb15676990b1cb8b969b86de8549b21a3729bd2cf11754211e1919131" - name = "github.com/hashicorp/go-plugin" - packages = [ - ".", - "internal/plugin", - ] - pruneopts = "UT" - revision = "9e3e1c37db188a1acb66561ee0ed4bf4d5e77554" - version = "v1.0.1" - -[[projects]] - branch = "master" - digest = "1:a4826c308e84f5f161b90b54a814f0be7d112b80164b9b884698a6903ea47ab3" - name = "github.com/hashicorp/yamux" - packages = ["."] - pruneopts = "UT" - revision = "2f1d1f20f75d5404f53b9edf6b53ed5505508675" - -[[projects]] - digest = "1:3e7f985557bc1a34b4a250d71801b7a1b67cce3ed1bf01f0db087efb969c29eb" - name = "github.com/mattermost/go-i18n" - packages = [ - "i18n", - "i18n/bundle", - "i18n/language", - "i18n/translation", - ] - pruneopts = "UT" - revision = "9a9b2eb57c9f39e7142fe48a35f047716c444ed1" - version = "v1.11.0" - -[[projects]] - digest = "1:b6e594e321771526b943a49fb932942485e851828079b992c81da00837814519" - name = "github.com/mattermost/ldap" - packages = ["."] - pruneopts = "UT" - revision = "fee0a369f21be68ea3d647c0da8272a352fa1528" - version = "v3.0.4" - -[[projects]] - digest = "1:12b0901e7242ff04d86ce22a52903b38eb3f12919503574d54d784b4cac56141" - name = "github.com/mattermost/mattermost-server" - packages = [ - "mlog", - "model", - "plugin", - "plugin/plugintest", - "plugin/plugintest/mock", - "services/timezones", - "utils", - "utils/fileutils", - "utils/jsonutils", - "utils/markdown", - ] - pruneopts = "UT" - revision = "f4f7fd0829d74fcd5e290fd1ba76845af18e553e" - version = "v5.15.0" - -[[projects]] - digest = "1:42eb1f52b84a06820cedc9baec2e710bfbda3ee6dac6cdb97f8b9a5066134ec6" - name = "github.com/mitchellh/go-testing-interface" - packages = ["."] - pruneopts = "UT" - revision = "6d0b8010fcc857872e42fc6c931227569016843c" - version = "v1.0.0" - -[[projects]] - digest = "1:9ec6cf1df5ad1d55cf41a43b6b1e7e118a91bade4f68ff4303379343e40c0e25" - name = "github.com/oklog/run" - packages = ["."] - pruneopts = "UT" - revision = "4dadeb3030eda0273a12382bb2348ffc7c9d1a39" - version = "v1.0.0" - -[[projects]] - digest = "1:e5d0bd87abc2781d14e274807a470acd180f0499f8bf5bb18606e9ec22ad9de9" - name = "github.com/pborman/uuid" - packages = ["."] - pruneopts = "UT" - revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1" - version = "v1.2" - -[[projects]] - digest = "1:93131d8002d7025da13582877c32d1fc302486775a1b06f62241741006428c5e" - name = "github.com/pelletier/go-toml" - packages = ["."] - pruneopts = "UT" - revision = "728039f679cbcd4f6a54e080d2219a4c4928c546" - version = "v1.4.0" - -[[projects]] - digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b" - name = "github.com/pkg/errors" - packages = ["."] - pruneopts = "UT" - revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4" - version = "v0.8.1" - -[[projects]] - digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - pruneopts = "UT" - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - digest = "1:ac83cf90d08b63ad5f7e020ef480d319ae890c208f8524622a2f3136e2686b02" - name = "github.com/stretchr/objx" - packages = ["."] - pruneopts = "UT" - revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c" - version = "v0.1.1" - -[[projects]] - digest = "1:86562a966508283e1ce3f6de0be5a7d813070271d6e14dc5af3cd5863fee03a4" - name = "github.com/stretchr/testify" - packages = [ - "assert", - "mock", - "require", - ] - pruneopts = "UT" - revision = "221dbe5ed46703ee255b1da0dec05086f5035f62" - version = "v1.4.0" - -[[projects]] - digest = "1:a5158647b553c61877aa9ae74f4015000294e47981e6b8b07525edcbb0747c81" - name = "go.uber.org/atomic" - packages = ["."] - pruneopts = "UT" - revision = "df976f2515e274675050de7b3f42545de80594fd" - version = "v1.4.0" - -[[projects]] - digest = "1:60bf2a5e347af463c42ed31a493d817f8a72f102543060ed992754e689805d1a" - name = "go.uber.org/multierr" - packages = ["."] - pruneopts = "UT" - revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" - version = "v1.1.0" - -[[projects]] - digest = "1:676160e6a4722b08e0e26b11521d575c2cb2b6f0c679e1ee6178c5d8dee51e5e" - name = "go.uber.org/zap" - packages = [ - ".", - "buffer", - "internal/bufferpool", - "internal/color", - "internal/exit", - "zapcore", - ] - pruneopts = "UT" - revision = "27376062155ad36be76b0f12cf1572a221d3a48c" - version = "v1.10.0" - -[[projects]] - branch = "master" - digest = "1:9d5b5d543996dd584da1db1e0de1926f3e4c3a8dba0fa2f8db70f3ebee2342e0" - name = "golang.org/x/crypto" - packages = [ - "bcrypt", - "blowfish", - ] - pruneopts = "UT" - revision = "227b76d455e791cb042b03e633e2f7fbcfdf74a5" - -[[projects]] - branch = "master" - digest = "1:4196eeac75aa18c99a09e9a6206ba763a25c6f695531a5e0736b06d81ba3ef7b" - name = "golang.org/x/net" - packages = [ - "context", - "html", - "html/atom", - "http/httpguts", - "http2", - "http2/hpack", - "idna", - "internal/timeseries", - "trace", - ] - pruneopts = "UT" - revision = "1a5e07d1ff72da30f660d3c95b312a20be31173c" - -[[projects]] - branch = "master" - digest = "1:efaeaf7e03393d54963ece1e4c1866915d712a07adf4be8ea8105a67e3418e8f" - name = "golang.org/x/sys" - packages = ["unix"] - pruneopts = "UT" - revision = "0a153f010e6963173baba2306531d173aa843137" - -[[projects]] - digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405" - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "internal/colltab", - "internal/gen", - "internal/language", - "internal/language/compact", - "internal/tag", - "internal/triegen", - "internal/ucd", - "language", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable", - ] - pruneopts = "UT" - revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475" - version = "v0.3.2" - -[[projects]] - branch = "master" - digest = "1:583a0c80f5e3a9343d33aea4aead1e1afcc0043db66fdf961ddd1fe8cd3a4faf" - name = "google.golang.org/genproto" - packages = ["googleapis/rpc/status"] - pruneopts = "UT" - revision = "f660b865573183437d2d868f703fe88bb8af0b55" - -[[projects]] - digest = "1:cf0d541de126d13cf9187c90a8f84df915f603656b59e78778d043cb3dc1ba94" - name = "google.golang.org/grpc" - packages = [ - ".", - "balancer", - "balancer/base", - "balancer/roundrobin", - "binarylog/grpc_binarylog_v1", - "codes", - "connectivity", - "credentials", - "credentials/internal", - "encoding", - "encoding/proto", - "grpclog", - "health", - "health/grpc_health_v1", - "internal", - "internal/backoff", - "internal/balancerload", - "internal/binarylog", - "internal/channelz", - "internal/envconfig", - "internal/grpcrand", - "internal/grpcsync", - "internal/syscall", - "internal/transport", - "keepalive", - "metadata", - "naming", - "peer", - "resolver", - "resolver/dns", - "resolver/passthrough", - "serviceconfig", - "stats", - "status", - "tap", - ] - pruneopts = "UT" - revision = "39e8a7b072a67ca2a75f57fa2e0d50995f5b22f6" - version = "v1.23.1" - -[[projects]] - digest = "1:af07c44dc04418be522bfd4e21ca9130d58169ea084e3a883e23772003a381c4" - name = "gopkg.in/asn1-ber.v1" - packages = ["."] - pruneopts = "UT" - revision = "f715ec2f112d1e4195b827ad68cf44017a3ef2b1" - version = "v1.3" - -[[projects]] - digest = "1:c805e517269b0ba4c21ded5836019ed7d16953d4026cb7d00041d039c7906be9" - name = "gopkg.in/natefinch/lumberjack.v2" - packages = ["."] - pruneopts = "UT" - revision = "a96e63847dc3c67d17befa69c303767e2f84e54f" - version = "v2.1" - -[[projects]] - digest = "1:4d2e5a73dc1500038e504a8d78b986630e3626dc027bc030ba5c75da257cdb96" - name = "gopkg.in/yaml.v2" - packages = ["."] - pruneopts = "UT" - revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" - version = "v2.2.2" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "github.com/mattermost/mattermost-server/model", - "github.com/mattermost/mattermost-server/plugin", - "github.com/mattermost/mattermost-server/plugin/plugintest", - "github.com/mattermost/mattermost-server/plugin/plugintest/mock", - "github.com/pkg/errors", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml deleted file mode 100644 index 5c879c7..0000000 --- a/Gopkg.toml +++ /dev/null @@ -1,3 +0,0 @@ -[prune] - go-tests = true - unused-packages = true diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..67916f0 --- /dev/null +++ b/go.mod @@ -0,0 +1,9 @@ +module matterbar + +go 1.16 + +require ( + github.com/mattermost/mattermost-server/v5 v5.32.1 + github.com/pkg/errors v0.9.1 + github.com/stretchr/testify v1.7.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..7388394 --- /dev/null +++ b/go.sum @@ -0,0 +1,1133 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= +cloud.google.com/go v0.37.1/go.mod h1:SAbnLi6YTSPKSI0dTUEOVLCkyPfKXK8n4ibqiMoj4ok= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +code.sajari.com/docconv v1.1.1-0.20200701232649-d9ea05fbd50a/go.mod h1:DooS873W9YwUjTwEYGpg55aDlvnx1VcEdr7IJ9rEW8g= +contrib.go.opencensus.io/exporter/ocagent v0.4.9/go.mod h1:ueLzZcP7LPhPulEBukGn4aLh7Mx9YJwpVJ9nL2FYltw= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/Azure/azure-sdk-for-go v26.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-autorest v11.5.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno= +github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4= +github.com/JalfResi/justext v0.0.0-20170829062021-c0282dea7198/go.mod h1:0SURuH1rsE8aVWvutuMZghRNrNrYEUzibzJfhEYR8L0= +github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Masterminds/glide v0.13.2/go.mod h1:STyF5vcenH/rUqTEv+/hBXlSTo7KYwg2oc2f4tzPWic= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/squirrel v1.5.0/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= +github.com/Masterminds/vcs v1.13.0/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PaulARoy/azurestoragecache v0.0.0-20170906084534-3c249a3ba788/go.mod h1:lY1dZd8HBzJ10eqKERHn3CU59tfhzcAVb2c0ZhIWSOk= +github.com/PuerkitoBio/goquery v1.4.1/go.mod h1:T9ezsOHcCrDCgA8aF1Cqr3sSYbO/xgdy8/R/XiIMAhA= +github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= +github.com/PuerkitoBio/goquery v1.6.0/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= +github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= +github.com/RoaringBitmap/roaring v0.5.5/go.mod h1:puNo5VdzwbaIQxSiDIwfXl4Hnc+fbovcX4IW/dSTtUk= +github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/advancedlogic/GoOse v0.0.0-20191112112754-e742535969c1/go.mod h1:f3HCSN1fBWjcpGtXyM119MJgeQl838v6so/PQOqvE1w= +github.com/advancedlogic/GoOse v0.0.0-20200830213114-1225d531e0ad/go.mod h1:f3HCSN1fBWjcpGtXyM119MJgeQl838v6so/PQOqvE1w= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= +github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= +github.com/andybalholm/cascadia v1.2.0/go.mod h1:YCyR8vOZT9aZ1CHEd8ap0gMVm2aFgxBp0T0eFw1RUQY= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/araddon/dateparse v0.0.0-20180729174819-cfd92a431d0e/go.mod h1:SLqhdZcd+dF3TEVL2RMoob5bBP5R1P1qkox+HtCBgGI= +github.com/araddon/dateparse v0.0.0-20200409225146-d820a6159ab1/go.mod h1:SLqhdZcd+dF3TEVL2RMoob5bBP5R1P1qkox+HtCBgGI= +github.com/araddon/dateparse v0.0.0-20201001162425-8aadafed4dc4/go.mod h1:hMAUZFIkk4B1FouGxqlogyMyU6BwY/UiVmmbbzz9Up8= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.5/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/avct/uasurfer v0.0.0-20191028135549-26b5daa857f1/go.mod h1:noBAuukeYOXa0aXGqxr24tADqkwDO2KRD15FsuaZ5a8= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.19.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.36.15/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= +github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blevesearch/bleve v1.0.14/go.mod h1:e/LJTr+E7EaoVdkQZTfoz7dt4KoDNvDbLb8MSKuNTLQ= +github.com/blevesearch/blevex v1.0.0/go.mod h1:2rNVqoG2BZI8t1/P1awgTKnGlx5MP9ZbtEciQaNhswc= +github.com/blevesearch/cld2 v0.0.0-20200327141045-8b5f551d37f5/go.mod h1:PN0QNTLs9+j1bKy3d/GB/59wsNBFC4sWLWG3k69lWbc= +github.com/blevesearch/go-porterstemmer v1.0.3/go.mod h1:angGc5Ht+k2xhJdZi511LtmxuEf0OVpvUUNrwmM1P7M= +github.com/blevesearch/mmap-go v1.0.2/go.mod h1:ol2qBqYaOUsGdm7aRMRrYGgPvnwLe6Y+7LMvAB5IbSA= +github.com/blevesearch/segment v0.9.0/go.mod h1:9PfHYUdQCgHktBgvtUOF4x+pc4/l8rdH0u5spnW85UQ= +github.com/blevesearch/snowballstem v0.9.0/go.mod h1:PivSj3JMc8WuaFkTSRDW2SlrulNWPl4ABg1tC/hlgLs= +github.com/blevesearch/zap/v11 v11.0.14/go.mod h1:MUEZh6VHGXv1PKx3WnCbdP404LGG2IZVa/L66pyFwnY= +github.com/blevesearch/zap/v12 v12.0.14/go.mod h1:rOnuZOiMKPQj18AEKEHJxuI14236tTQ1ZJz4PAnWlUg= +github.com/blevesearch/zap/v13 v13.0.6/go.mod h1:L89gsjdRKGyGrRN6nCpIScCvvkyxvmeDCwZRcjjPCrw= +github.com/blevesearch/zap/v14 v14.0.5/go.mod h1:bWe8S7tRrSBTIaZ6cLRbgNH4TUDaC9LZSpRGs85AsGY= +github.com/blevesearch/zap/v15 v15.0.3/go.mod h1:iuwQrImsh1WjWJ0Ue2kBqY83a0rFtJTqfa9fp1rbVVU= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cheggaaa/pb v1.0.29/go.mod h1:W40334L7FMC5JKWldsTWbdGjLo0RxUKK73K+TuPxX30= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/corpix/uarand v0.1.1/go.mod h1:SFKZvkcRoLqVRFZ4u25xPmp6m9ktANfbpXZ7SJ0/FNU= +github.com/couchbase/ghistogram v0.1.0/go.mod h1:s1Jhy76zqfEecpNWJfWUiKZookAFaiGOEoyzgHt9i7k= +github.com/couchbase/moss v0.1.0/go.mod h1:9MaHIaRuy9pvLPUJxB8sh8OrLfyDczECVL37grCIubs= +github.com/couchbase/vellum v1.0.2/go.mod h1:FcwrEivFpNi24R3jLOs3n+fs5RnuQnQqCLBJ1uAg1W4= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= +github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= +github.com/cznic/strutil v0.0.0-20181122101858-275e90344537/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/dgoogauth v0.0.0-20190221195224-5a805980a5f3/go.mod h1:hEfFauPHz7+NnjR/yHJGhrKo1Za+zStgwUETx3yzqgY= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/die-net/lrucache v0.0.0-20181227122439-19a39ef22a11/go.mod h1:ew0MSjCVDdtGMjF3kzLK9hwdgF5mOE8SbYVF3Rc7mkU= +github.com/disintegration/imaging v1.6.0/go.mod h1:xuIt+sRxDFrHS0drzXUlCJthkJ8k7lkkUojDSR247MQ= +github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c= +github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo= +github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dyatlov/go-opengraph v0.0.0-20180429202543-816b6608b3c8 h1:6muCmMJat6z7qptVrIf/+OWPxsjAfvhw5/6t+FwEkgg= +github.com/dyatlov/go-opengraph v0.0.0-20180429202543-816b6608b3c8/go.mod h1:nYia/MIs9OyvXXYboPmNOj0gVWo97Wx0sde+ZuKkoM4= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= +github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg= +github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/set v0.2.1/go.mod h1:+RKtMCH+favT2+3YecHGxcc0b4KyVWA1QWWJUs4E0CI= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= +github.com/getsentry/sentry-go v0.9.0/go.mod h1:kELm/9iCblqUYh+ZRML7PNdCvEuw24wBvJPYyi86cws= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gigawattio/window v0.0.0-20180317192513-0f5467e35573/go.mod h1:eBvb3i++NHDH4Ugo9qCvMw8t0mTSctaEa5blJbWcNxs= +github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= +github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= +github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= +github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/go-asn1-ber/asn1-ber v1.3.2-0.20191121212151-29be175fc3a3/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-asn1-ber/asn1-ber v1.5.3 h1:u7utq56RUFiynqUzgVMFDymapcOtQ/MZkh3H4QYkxag= +github.com/go-asn1-ber/asn1-ber v1.5.3/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= +github.com/go-redis/redis/v8 v8.0.0/go.mod h1:isLoQT/NFSP7V67lyvM9GmdvLdyZ7pEhsXvvyQtnQTo= +github.com/go-redis/redis/v8 v8.4.4/go.mod h1:nA0bQuF0i5JFx4Ta9RZxGKXFrQ8cRWntra97f0196iY= +github.com/go-resty/resty/v2 v2.0.0/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8= +github.com/go-resty/resty/v2 v2.3.0/go.mod h1:UpN9CgLZNsv4e9XG50UU8xdI0F43UQ4HmxLBDwaroHU= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= +github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/schema v1.2.0/go.mod h1:kgLaKoK1FELgZqMAVxx/5cbj0kT+57qxUrAlIO2eleU= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c/go.mod h1:ObS/W+h8RYb1Y7fYivughjxojTmIu5iAIjSrSLCLeqE= +github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026/go.mod h1:5Scbynm8dF1XAPwIwkGPqzkM/shndPm79Jd1003hTjE= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.15.0 h1:qMuK0wxsoW4D0ddCCYwPSTm4KQv1X1ke3WmPWZ0Mvsk= +github.com/hashicorp/go-hclog v0.15.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-plugin v1.4.0 h1:b0O7rs5uiJ99Iu9HugEzsM67afboErkHUWddUSpUO3A= +github.com/hashicorp/go-plugin v1.4.0/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20200609203250-aecfd211c9ce h1:7UnVY3T/ZnHUrfviiAgIUjg2PXxsQfs5bphsG8F7Keo= +github.com/hashicorp/yamux v0.0.0-20200609203250-aecfd211c9ce/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo= +github.com/ikawaha/kagome.ipadic v1.1.2/go.mod h1:DPSBbU0czaJhAb/5uKQZHMc9MTVRpDugJfX+HddPHHg= +github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= +github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= +github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk= +github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g= +github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= +github.com/jamiealquiza/envy v1.1.0/go.mod h1:MP36BriGCLwEHhi1OU8E9569JNZrjWfCvzG7RsPnHus= +github.com/jaytaylor/html2text v0.0.0-20180606194806-57d518f124b0/go.mod h1:CVKlgaMiht+LXvHG173ujK6JUhZXKb2u/BQtjPDIvyk= +github.com/jaytaylor/html2text v0.0.0-20200412013138-3577fbdbcff7/go.mod h1:CVKlgaMiht+LXvHG173ujK6JUhZXKb2u/BQtjPDIvyk= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= +github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= +github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8= +github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE= +github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE= +github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro= +github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= +github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/kljensen/snowball v0.6.0/go.mod h1:27N7E8fVU5H68RlUmnWwZCfxgt4POBJfENGMvNRhldw= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g= +github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/ledongthuc/pdf v0.0.0-20200323191019-23c5852adbd2/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= +github.com/levigross/exp-html v0.0.0-20120902181939-8df60c69a8f5/go.mod h1:QMe2wuKJ0o7zIVE8AqiT8rd8epmm6WDIZ2wyuBqYPzM= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/marstr/guid v0.0.0-20170427235115-8bdf7d1a087c/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/mattermost/go-i18n v1.11.0 h1:1hLKqn/ZvhZ80OekjVPGYcCrBfMz+YxNNgqS+beL7zE= +github.com/mattermost/go-i18n v1.11.0/go.mod h1:RyS7FDNQlzF1PsjbJWHRI35exqaKGSO9qD4iv8QjE34= +github.com/mattermost/gorp v1.6.2-0.20200624165429-2595d5e54111/go.mod h1:QCQ3U0M9T/BlAdjKFJo0I1oe/YAgbyjNdhU8bpOLafk= +github.com/mattermost/gosaml2 v0.3.3/go.mod h1:Z429EIOiEi9kbq6yHoApfzlcXpa6dzRDc6pO+Vy2Ksk= +github.com/mattermost/ldap v0.0.0-20201202150706-ee0e6284187d h1:/RJ/UV7M5c7L2TQ0KNm4yZxxFvC1nvRz/gY/Daa35aI= +github.com/mattermost/ldap v0.0.0-20201202150706-ee0e6284187d/go.mod h1:HLbgMEI5K131jpxGazJ97AxfPDt31osq36YS1oxFQPQ= +github.com/mattermost/logr v1.0.13 h1:6F/fM3csvH6Oy5sUpJuW7YyZSzZZAhJm5VcgKMxA2P8= +github.com/mattermost/logr v1.0.13/go.mod h1:Mt4DPu1NXMe6JxPdwCC0XBoxXmN9eXOIRPoZarU2PXs= +github.com/mattermost/mattermost-server/v5 v5.32.1 h1:h9J8qLdUVXLFdMAacD39KaLeDVoiEkArzkGZE/BkXyg= +github.com/mattermost/mattermost-server/v5 v5.32.1/go.mod h1:tJ3IVS/5N1HwtmgPV2DIWPCjTJR40OCDGbur8vlRfkY= +github.com/mattermost/rsc v0.0.0-20160330161541-bbaefb05eaa0/go.mod h1:nV5bfVpT//+B1RPD2JvRnxbkLmJEYXmRaaVl15fsXjs= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= +github.com/mholt/archiver/v3 v3.5.0/go.mod h1:qqTTPUK/HZPFgFQ/TJ3BzvTpF/dPtFVJXdQbCmeMxwc= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= +github.com/minio/md5-simd v1.1.1/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= +github.com/minio/minio-go/v7 v7.0.6/go.mod h1:HcIuq+11d/3MfavIPZiswSzfQ1VJ2Lwxp/XLtW46IWQ= +github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= +github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= +github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw= +github.com/muesli/smartcrop v0.2.1-0.20181030220600-548bbf0c0965/go.mod h1:i2fCI/UorTfgEpPPLWiFBv4pye+YAG78RwcQLUkocpI= +github.com/muesli/smartcrop v0.3.0/go.mod h1:i2fCI/UorTfgEpPPLWiFBv4pye+YAG78RwcQLUkocpI= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8= +github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM= +github.com/nwaples/rardecode v1.1.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.0-20180506121414-d4647c9c7a84/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= +github.com/olivere/elastic v6.2.35+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= +github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.4/go.mod h1:g/HbgYopi++010VEqkFgJHKC09uJiW9UkXvMUuKHUCQ= +github.com/oov/psd v0.0.0-20201203182240-dad9002861d9/go.mod h1:GHI1bnmAcbp96z6LNfBJvtrjxhaXGkbsk967utPlvL8= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/gosseract/v2 v2.2.4/go.mod h1:ahOp/kHojnOMGv1RaUnR0jwY5JVa6BYKhYAS8nbMLSo= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= +github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.8.1 h1:1Nf83orprkJyknT6h7zbuEGUEjcyVlCxSUGTENmNCRM= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/peterbourgon/diskv v0.0.0-20171120014656-2973218375c3/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= +github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4/v4 v4.0.3/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.2/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/reflog/dateconstraints v0.2.1/go.mod h1:Ax8AxTBcJc3E/oVS2hd2j7RDM/5MDtuPwuR7lIHtPLo= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rudderlabs/analytics-go v3.2.1+incompatible/go.mod h1:LF8/ty9kUX4PTY3l5c97K3nZZaX5Hwsvt+NBaRL/f30= +github.com/russellhaering/goxmldsig v1.1.0/go.mod h1:QK8GhXPB3+AfuCrfo0oRISa9NfzeCpWmxeGnqEpDF9o= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/satori/go.uuid v0.0.0-20180103174451-36e9d2ebbde5/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g= +github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= +github.com/scylladb/termtables v0.0.0-20191203121021-c4c0b6d42ff4/go.mod h1:C1a7PQSMz9NShzorzCiG2fk9+xuCgLkPeCvMHYR2OWg= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/segmentio/backo-go v0.0.0-20200129164019-23eae7c10bd3/go.mod h1:9/Rh6yILuLysoQnZ2oNooD2g7aBnvM7r/fNVxRNWfBc= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/simplereach/timeutils v1.2.0/go.mod h1:VVbQDfN/FHRZa1LSqcwo4kNZ62OOyqLLGQKYB3pB0Q8= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/splitio/go-client/v6 v6.0.1/go.mod h1:WHjkBtEEJFHOpgqnHSb/X+e4CgzTi1gPNkg+c7w1Izg= +github.com/splitio/go-split-commons/v2 v2.0.1/go.mod h1:YsmZa0AE9DCTosPXekKBxYHzNXJHpZwID9FSfo5Tnis= +github.com/splitio/go-toolkit/v3 v3.0.1/go.mod h1:HGgawLnM2RlM84zVRbATpPMjF7H6u9CUYG6RlpwOlOk= +github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf/go.mod h1:RJID2RhlZKId02nZ62WenDCkgHFerpIOmW0iT7GKmXM= +github.com/steveyen/gtreap v0.1.0/go.mod h1:kl/5J7XbrOmlIbYIXdRHDDE5QxHqpk0cmkT7Z4dM9/Y= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.3.0 h1:NGXK3lHquSN08v5vWalVI/L8XU9hdzE/G6xsrze47As= +github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/tebeka/snowball v0.4.2/go.mod h1:4IfL14h1lvwZcp1sfXuuc7/7yCsvVffTWxWxCLfFpYg= +github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= +github.com/throttled/throttled v2.2.5+incompatible/go.mod h1:0BjlrEGQmvxps+HuXLsyRdqpSRvJpq0PNIsOtqP9Nos= +github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tinylib/msgp v1.1.5 h1:2gXmtWueD2HefZHQe1QOy9HVzmFrLOVvsXwXBQ0ayy0= +github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/tylerb/graceful v1.2.15/go.mod h1:LPYTbOYmUTdabwRt0TGhLllQ0MUNbs0Y5q1WXJOI9II= +github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/vmihailenco/msgpack/v5 v5.1.1/go.mod h1:C5gboKD0TJPqWDTVTtrQNfRbiBwHZGo8UTqP/9/XvLI= +github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/wiggin77/cfg v1.0.2 h1:NBUX+iJRr+RTncTqTNvajHwzduqbhCQjEqxLHr6Fk7A= +github.com/wiggin77/cfg v1.0.2/go.mod h1:b3gotba2e5bXTqTW48DwIFoLc+4lWKP7WPi/CdvZ4aE= +github.com/wiggin77/merror v1.0.2/go.mod h1:uQTcIU0Z6jRK4OwqganPYerzQxSFJ4GSHM3aurxxQpg= +github.com/wiggin77/merror v1.0.3 h1:8+ZHV+aSnJoYghE3EUThl15C6rvF2TYRSvOSBjdmNR8= +github.com/wiggin77/merror v1.0.3/go.mod h1:H2ETSu7/bPE0Ymf4bEwdUoo73OOEkdClnoRisfw0Nm0= +github.com/wiggin77/srslog v1.0.1 h1:gA2XjSMy3DrRdX9UqLuDtuVAAshb8bE1NhX1YK0Qe+8= +github.com/wiggin77/srslog v1.0.1/go.mod h1:fehkyYDq1QfuYn60TDPu9YdY2bB85VUW2mvN1WynEls= +github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63MEE3IF9l2/ebyx59GyGgPi+tICQdmM= +github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= +github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A= +go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opentelemetry.io/otel v0.11.0/go.mod h1:G8UCk+KooF2HLkgo8RHX9epABH/aRGYET7gQOqBVdB0= +go.opentelemetry.io/otel v0.15.0/go.mod h1:e4GKElweB8W2gWUqbghw0B8t5MCTccc9212eNHnOHwA= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/build v0.0.0-20190314133821-5284462c4bec/go.mod h1:atTaCNAy0f16Ah5aV1gMSwgiKVHwu/JncqDpuRr7lS4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20200908183739-ae8ad444f925/go.mod h1:1phAWC201xIgDyaFpmDeZkgf70Q4Pd/CNqfRtVPtxNw= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190321063152-3fc05d484e9f/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6 h1:nfeHNc1nAqecKCy2FCy4HY+soOOe5sDLJ/gZLbx6GYI= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0 h1:8pl+sMODzuvGJkmj2W4kZihvVb5mKm8pB/X44PIQHv8= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b h1:iFwSg7t5GZmB/Q5TjiEAsdoLDrdJRC1RiF2WhuV29Qw= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190319182350-c85d3e98c914/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201223074533-0d417f636930 h1:vRgIt+nup/B/BwIS0g2oC0haq0iqbV3ZA+u6+0TlNCo= +golang.org/x/sys v0.0.0-20201223074533-0d417f636930/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200928182047-19e03678916f/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e h1:4nW4NLDYnU28ojHaHO8OVxFHk/aQ33U01a9cjED+pzE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190321212433-e79c0c59cdb5/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d h1:HV9Z9qMhQEsdlvxNFELgQ11RkMzO3CMkjEySjCtuLes= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.34.0 h1:raiipEjMOIC/TO2AvyTxP25XFdLxNIBwzDh3FM3XztI= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw= +gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/olivere/elastic.v6 v6.2.35/go.mod h1:2cTT8Z+/LcArSWpCgvZqBgt3VOqXiy7v00w12Lz8bd4= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= +willnorris.com/go/gifresize v1.0.0/go.mod h1:eBM8gogBGCcaH603vxSpnfjwXIpq6nmnj/jauBDKtAk= +willnorris.com/go/imageproxy v0.10.0/go.mod h1:2tWdKRneln3E9X/zwH1RINpQAQWPeUiNynZ7UQ9OROk= diff --git a/server/command.go b/server/command.go index 77a183e..ddc3d2b 100644 --- a/server/command.go +++ b/server/command.go @@ -6,8 +6,8 @@ import ( "sort" "strings" - "github.com/mattermost/mattermost-server/model" - "github.com/mattermost/mattermost-server/plugin" + "github.com/mattermost/mattermost-server/v5/model" + "github.com/mattermost/mattermost-server/v5/plugin" ) const ( diff --git a/server/command_test.go b/server/command_test.go index b71457e..39adfbf 100644 --- a/server/command_test.go +++ b/server/command_test.go @@ -4,9 +4,9 @@ import ( "fmt" "testing" - "github.com/mattermost/mattermost-server/model" - "github.com/mattermost/mattermost-server/plugin/plugintest" - "github.com/mattermost/mattermost-server/plugin/plugintest/mock" + "github.com/mattermost/mattermost-server/v5/model" + "github.com/mattermost/mattermost-server/v5/plugin/plugintest" + "github.com/mattermost/mattermost-server/v5/plugin/plugintest/mock" ) func TestGetUsernameList(t *testing.T) { diff --git a/server/configuration.go b/server/configuration.go index bc55f12..f4cf7ba 100644 --- a/server/configuration.go +++ b/server/configuration.go @@ -2,7 +2,7 @@ package main import ( "fmt" - "github.com/mattermost/mattermost-server/model" + "github.com/mattermost/mattermost-server/v5/model" "github.com/pkg/errors" ) diff --git a/server/configuration_test.go b/server/configuration_test.go index eb85c6a..5ea462b 100644 --- a/server/configuration_test.go +++ b/server/configuration_test.go @@ -3,9 +3,9 @@ package main import ( "testing" - "github.com/mattermost/mattermost-server/model" - "github.com/mattermost/mattermost-server/plugin/plugintest" - "github.com/mattermost/mattermost-server/plugin/plugintest/mock" + "github.com/mattermost/mattermost-server/v5/model" + "github.com/mattermost/mattermost-server/v5/plugin/plugintest" + "github.com/mattermost/mattermost-server/v5/plugin/plugintest/mock" ) func TestCloningConfiguration(t *testing.T) { diff --git a/server/http.go b/server/http.go index 37e5903..d7403e4 100644 --- a/server/http.go +++ b/server/http.go @@ -6,8 +6,8 @@ import ( "fmt" "net/http" - "github.com/mattermost/mattermost-server/model" - "github.com/mattermost/mattermost-server/plugin" + "github.com/mattermost/mattermost-server/v5/model" + "github.com/mattermost/mattermost-server/v5/plugin" ) const ( diff --git a/server/http_test.go b/server/http_test.go index cb1eec3..4afba01 100644 --- a/server/http_test.go +++ b/server/http_test.go @@ -9,9 +9,9 @@ import ( "path/filepath" "testing" - "github.com/mattermost/mattermost-server/model" - "github.com/mattermost/mattermost-server/plugin/plugintest" - "github.com/mattermost/mattermost-server/plugin/plugintest/mock" + "github.com/mattermost/mattermost-server/v5/model" + "github.com/mattermost/mattermost-server/v5/plugin/plugintest" + "github.com/mattermost/mattermost-server/v5/plugin/plugintest/mock" ) func loadJsonFile(t *testing.T, name string) []byte { diff --git a/server/main.go b/server/main.go index a9b838d..1945ea0 100644 --- a/server/main.go +++ b/server/main.go @@ -1,7 +1,7 @@ package main import ( - "github.com/mattermost/mattermost-server/plugin" + "github.com/mattermost/mattermost-server/v5/plugin" ) func main() { diff --git a/server/plugin.go b/server/plugin.go index d4b9d9a..26d01ae 100644 --- a/server/plugin.go +++ b/server/plugin.go @@ -5,8 +5,8 @@ import ( "path/filepath" "sync" - "github.com/mattermost/mattermost-server/model" - "github.com/mattermost/mattermost-server/plugin" + "github.com/mattermost/mattermost-server/v5/model" + "github.com/mattermost/mattermost-server/v5/plugin" "github.com/pkg/errors" ) diff --git a/server/plugin_test.go b/server/plugin_test.go index 2cdf1c2..abb771b 100644 --- a/server/plugin_test.go +++ b/server/plugin_test.go @@ -4,9 +4,9 @@ import ( "path/filepath" "testing" - "github.com/mattermost/mattermost-server/model" - "github.com/mattermost/mattermost-server/plugin/plugintest" - "github.com/mattermost/mattermost-server/plugin/plugintest/mock" + "github.com/mattermost/mattermost-server/v5/model" + "github.com/mattermost/mattermost-server/v5/plugin/plugintest" + "github.com/mattermost/mattermost-server/v5/plugin/plugintest/mock" ) func TestOnActivate(t *testing.T) { diff --git a/vendor/github.com/blang/semver/.travis.yml b/vendor/github.com/blang/semver/.travis.yml index 7319bde..102fb9a 100644 --- a/vendor/github.com/blang/semver/.travis.yml +++ b/vendor/github.com/blang/semver/.travis.yml @@ -1,14 +1,10 @@ language: go matrix: include: - - go: 1.4.x - - go: 1.5.x - - go: 1.6.x - - go: 1.7.x - - go: 1.8.x - - go: 1.9.x - - go: 1.10.x - - go: 1.11.x + - go: 1.4.3 + - go: 1.5.4 + - go: 1.6.3 + - go: 1.7 - go: tip allow_failures: - go: tip @@ -17,7 +13,7 @@ install: - go get github.com/mattn/goveralls script: - echo "Test and track coverage" ; $HOME/gopath/bin/goveralls -package "." -service=travis-ci - -repotoken=$COVERALLS_TOKEN + -repotoken $COVERALLS_TOKEN - echo "Build examples" ; cd examples && go build - echo "Check if gofmt'd" ; diff -u <(echo -n) <(gofmt -d -s .) env: diff --git a/vendor/github.com/blang/semver/README.md b/vendor/github.com/blang/semver/README.md index e05f986..08b2e4a 100644 --- a/vendor/github.com/blang/semver/README.md +++ b/vendor/github.com/blang/semver/README.md @@ -1,4 +1,4 @@ -semver for golang [![Build Status](https://travis-ci.org/blang/semver.svg?branch=master)](https://travis-ci.org/blang/semver) [![GoDoc](https://godoc.org/github.com/blang/semver?status.svg)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master) [![Go Report Card](https://goreportcard.com/badge/github.com/blang/semver)](https://goreportcard.com/report/github.com/blang/semver) +semver for golang [![Build Status](https://travis-ci.org/blang/semver.svg?branch=master)](https://travis-ci.org/blang/semver) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master) ====== semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`. @@ -83,8 +83,8 @@ Range usage: ``` v, err := semver.Parse("1.2.3") -expectedRange, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0") -if expectedRange(v) { +range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0") +if range(v) { //valid } diff --git a/vendor/github.com/blang/semver/go.mod b/vendor/github.com/blang/semver/go.mod deleted file mode 100644 index d008305..0000000 --- a/vendor/github.com/blang/semver/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/blang/semver diff --git a/vendor/github.com/blang/semver/range.go b/vendor/github.com/blang/semver/range.go index 95f7139..fca406d 100644 --- a/vendor/github.com/blang/semver/range.go +++ b/vendor/github.com/blang/semver/range.go @@ -327,7 +327,7 @@ func expandWildcardVersion(parts [][]string) ([][]string, error) { for _, p := range parts { var newParts []string for _, ap := range p { - if strings.Contains(ap, "x") { + if strings.Index(ap, "x") != -1 { opStr, vStr, err := splitComparatorVersion(ap) if err != nil { return nil, err diff --git a/vendor/github.com/blang/semver/semver.go b/vendor/github.com/blang/semver/semver.go index 4165bc7..8ee0842 100644 --- a/vendor/github.com/blang/semver/semver.go +++ b/vendor/github.com/blang/semver/semver.go @@ -26,7 +26,7 @@ type Version struct { Minor uint64 Patch uint64 Pre []PRVersion - Build []string //No Precedence + Build []string //No Precendence } // Version to string @@ -161,36 +161,6 @@ func (v Version) Compare(o Version) int { } -// IncrementPatch increments the patch version -func (v *Version) IncrementPatch() error { - if v.Major == 0 { - return fmt.Errorf("Patch version can not be incremented for %q", v.String()) - } - v.Patch += 1 - return nil -} - -// IncrementMinor increments the minor version -func (v *Version) IncrementMinor() error { - if v.Major == 0 { - return fmt.Errorf("Minor version can not be incremented for %q", v.String()) - } - v.Minor += 1 - v.Patch = 0 - return nil -} - -// IncrementMajor increments the major version -func (v *Version) IncrementMajor() error { - if v.Major == 0 { - return fmt.Errorf("Major version can not be incremented for %q", v.String()) - } - v.Major += 1 - v.Minor = 0 - v.Patch = 0 - return nil -} - // Validate validates v and returns error in case func (v Version) Validate() error { // Major, Minor, Patch already validated using uint64 @@ -232,21 +202,14 @@ func Make(s string) (Version, error) { // ParseTolerant allows for certain version specifications that do not strictly adhere to semver // specs to be parsed by this library. It does so by normalizing versions before passing them to -// Parse(). It currently trims spaces, removes a "v" prefix, adds a 0 patch number to versions -// with only major and minor components specified, and removes leading 0s. +// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions +// with only major and minor components specified func ParseTolerant(s string) (Version, error) { s = strings.TrimSpace(s) s = strings.TrimPrefix(s, "v") // Split into major.minor.(patch+pr+meta) parts := strings.SplitN(s, ".", 3) - // Remove leading zeros. - for i, p := range parts { - if len(p) > 1 { - parts[i] = strings.TrimPrefix(p, "0") - } - } - // Fill up shortened versions. if len(parts) < 3 { if strings.ContainsAny(parts[len(parts)-1], "+-") { return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data") @@ -254,8 +217,8 @@ func ParseTolerant(s string) (Version, error) { for len(parts) < 3 { parts = append(parts, "0") } + s = strings.Join(parts, ".") } - s = strings.Join(parts, ".") return Parse(s) } diff --git a/vendor/github.com/blang/semver/sql.go b/vendor/github.com/blang/semver/sql.go index db95813..eb4d802 100644 --- a/vendor/github.com/blang/semver/sql.go +++ b/vendor/github.com/blang/semver/sql.go @@ -14,7 +14,7 @@ func (v *Version) Scan(src interface{}) (err error) { case []byte: str = string(src) default: - return fmt.Errorf("version.Scan: cannot convert %T to string", src) + return fmt.Errorf("Version.Scan: cannot convert %T to string.", src) } if t, err := Parse(str); err == nil { diff --git a/vendor/github.com/disintegration/imaging/.travis.yml b/vendor/github.com/disintegration/imaging/.travis.yml new file mode 100644 index 0000000..7ae5e4b --- /dev/null +++ b/vendor/github.com/disintegration/imaging/.travis.yml @@ -0,0 +1,12 @@ +language: go +go: + - "1.10.x" + - "1.11.x" + - "1.12.x" + +before_install: + - go get github.com/mattn/goveralls + +script: + - go test -v -race -cover + - $GOPATH/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/disintegration/imaging/LICENSE b/vendor/github.com/disintegration/imaging/LICENSE new file mode 100644 index 0000000..a4144a9 --- /dev/null +++ b/vendor/github.com/disintegration/imaging/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2012 Grigory Dryapak + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/disintegration/imaging/README.md b/vendor/github.com/disintegration/imaging/README.md new file mode 100644 index 0000000..a1fd764 --- /dev/null +++ b/vendor/github.com/disintegration/imaging/README.md @@ -0,0 +1,226 @@ +# Imaging + +[![GoDoc](https://godoc.org/github.com/disintegration/imaging?status.svg)](https://godoc.org/github.com/disintegration/imaging) +[![Build Status](https://travis-ci.org/disintegration/imaging.svg?branch=master)](https://travis-ci.org/disintegration/imaging) +[![Coverage Status](https://coveralls.io/repos/github/disintegration/imaging/badge.svg?branch=master&service=github)](https://coveralls.io/github/disintegration/imaging?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/disintegration/imaging)](https://goreportcard.com/report/github.com/disintegration/imaging) + +Package imaging provides basic image processing functions (resize, rotate, crop, brightness/contrast adjustments, etc.). + +All the image processing functions provided by the package accept any image type that implements `image.Image` interface +as an input, and return a new image of `*image.NRGBA` type (32bit RGBA colors, non-premultiplied alpha). + +## Installation + + go get -u github.com/disintegration/imaging + +## Documentation + +http://godoc.org/github.com/disintegration/imaging + +## Usage examples + +A few usage examples can be found below. See the documentation for the full list of supported functions. + +### Image resizing + +```go +// Resize srcImage to size = 128x128px using the Lanczos filter. +dstImage128 := imaging.Resize(srcImage, 128, 128, imaging.Lanczos) + +// Resize srcImage to width = 800px preserving the aspect ratio. +dstImage800 := imaging.Resize(srcImage, 800, 0, imaging.Lanczos) + +// Scale down srcImage to fit the 800x600px bounding box. +dstImageFit := imaging.Fit(srcImage, 800, 600, imaging.Lanczos) + +// Resize and crop the srcImage to fill the 100x100px area. +dstImageFill := imaging.Fill(srcImage, 100, 100, imaging.Center, imaging.Lanczos) +``` + +Imaging supports image resizing using various resampling filters. The most notable ones: +- `Lanczos` - A high-quality resampling filter for photographic images yielding sharp results. +- `CatmullRom` - A sharp cubic filter that is faster than Lanczos filter while providing similar results. +- `MitchellNetravali` - A cubic filter that produces smoother results with less ringing artifacts than CatmullRom. +- `Linear` - Bilinear resampling filter, produces smooth output. Faster than cubic filters. +- `Box` - Simple and fast averaging filter appropriate for downscaling. When upscaling it's similar to NearestNeighbor. +- `NearestNeighbor` - Fastest resampling filter, no antialiasing. + +The full list of supported filters: NearestNeighbor, Box, Linear, Hermite, MitchellNetravali, CatmullRom, BSpline, Gaussian, Lanczos, Hann, Hamming, Blackman, Bartlett, Welch, Cosine. Custom filters can be created using ResampleFilter struct. + +**Resampling filters comparison** + +Original image: + +![srcImage](testdata/branches.png) + +The same image resized from 600x400px to 150x100px using different resampling filters. +From faster (lower quality) to slower (higher quality): + +Filter | Resize result +--------------------------|--------------------------------------------- +`imaging.NearestNeighbor` | ![dstImage](testdata/out_resize_nearest.png) +`imaging.Linear` | ![dstImage](testdata/out_resize_linear.png) +`imaging.CatmullRom` | ![dstImage](testdata/out_resize_catrom.png) +`imaging.Lanczos` | ![dstImage](testdata/out_resize_lanczos.png) + + +### Gaussian Blur + +```go +dstImage := imaging.Blur(srcImage, 0.5) +``` + +Sigma parameter allows to control the strength of the blurring effect. + +Original image | Sigma = 0.5 | Sigma = 1.5 +-----------------------------------|----------------------------------------|--------------------------------------- +![srcImage](testdata/flowers_small.png) | ![dstImage](testdata/out_blur_0.5.png) | ![dstImage](testdata/out_blur_1.5.png) + +### Sharpening + +```go +dstImage := imaging.Sharpen(srcImage, 0.5) +``` + +`Sharpen` uses gaussian function internally. Sigma parameter allows to control the strength of the sharpening effect. + +Original image | Sigma = 0.5 | Sigma = 1.5 +-----------------------------------|-------------------------------------------|------------------------------------------ +![srcImage](testdata/flowers_small.png) | ![dstImage](testdata/out_sharpen_0.5.png) | ![dstImage](testdata/out_sharpen_1.5.png) + +### Gamma correction + +```go +dstImage := imaging.AdjustGamma(srcImage, 0.75) +``` + +Original image | Gamma = 0.75 | Gamma = 1.25 +-----------------------------------|------------------------------------------|----------------------------------------- +![srcImage](testdata/flowers_small.png) | ![dstImage](testdata/out_gamma_0.75.png) | ![dstImage](testdata/out_gamma_1.25.png) + +### Contrast adjustment + +```go +dstImage := imaging.AdjustContrast(srcImage, 20) +``` + +Original image | Contrast = 15 | Contrast = -15 +-----------------------------------|--------------------------------------------|------------------------------------------- +![srcImage](testdata/flowers_small.png) | ![dstImage](testdata/out_contrast_p15.png) | ![dstImage](testdata/out_contrast_m15.png) + +### Brightness adjustment + +```go +dstImage := imaging.AdjustBrightness(srcImage, 20) +``` + +Original image | Brightness = 10 | Brightness = -10 +-----------------------------------|----------------------------------------------|--------------------------------------------- +![srcImage](testdata/flowers_small.png) | ![dstImage](testdata/out_brightness_p10.png) | ![dstImage](testdata/out_brightness_m10.png) + +### Saturation adjustment + +```go +dstImage := imaging.AdjustSaturation(srcImage, 20) +``` + +Original image | Saturation = 30 | Saturation = -30 +-----------------------------------|----------------------------------------------|--------------------------------------------- +![srcImage](testdata/flowers_small.png) | ![dstImage](testdata/out_saturation_p30.png) | ![dstImage](testdata/out_saturation_m30.png) + +## FAQ + +### Incorrect image orientation after processing (e.g. an image appears rotated after resizing) + +Most probably, the given image contains the EXIF orientation tag. +The stadard `image/*` packages do not support loading and saving +this kind of information. To fix the issue, try opening images with +the `AutoOrientation` decode option. If this option is set to `true`, +the image orientation is changed after decoding, according to the +orientation tag (if present). Here's the example: + +```go +img, err := imaging.Open("test.jpg", imaging.AutoOrientation(true)) +``` + +### What's the difference between `imaging` and `gift` packages? + +[imaging](https://github.com/disintegration/imaging) +is designed to be a lightweight and simple image manipulation package. +It provides basic image processing functions and a few helper functions +such as `Open` and `Save`. It consistently returns *image.NRGBA image +type (8 bits per channel, RGBA). + +[gift](https://github.com/disintegration/gift) +supports more advanced image processing, for example, sRGB/Linear color +space conversions. It also supports different output image types +(e.g. 16 bits per channel) and provides easy-to-use API for chaining +multiple processing steps together. + +## Example code + +```go +package main + +import ( + "image" + "image/color" + "log" + + "github.com/disintegration/imaging" +) + +func main() { + // Open a test image. + src, err := imaging.Open("testdata/flowers.png") + if err != nil { + log.Fatalf("failed to open image: %v", err) + } + + // Crop the original image to 300x300px size using the center anchor. + src = imaging.CropAnchor(src, 300, 300, imaging.Center) + + // Resize the cropped image to width = 200px preserving the aspect ratio. + src = imaging.Resize(src, 200, 0, imaging.Lanczos) + + // Create a blurred version of the image. + img1 := imaging.Blur(src, 5) + + // Create a grayscale version of the image with higher contrast and sharpness. + img2 := imaging.Grayscale(src) + img2 = imaging.AdjustContrast(img2, 20) + img2 = imaging.Sharpen(img2, 2) + + // Create an inverted version of the image. + img3 := imaging.Invert(src) + + // Create an embossed version of the image using a convolution filter. + img4 := imaging.Convolve3x3( + src, + [9]float64{ + -1, -1, 0, + -1, 1, 1, + 0, 1, 1, + }, + nil, + ) + + // Create a new image and paste the four produced images into it. + dst := imaging.New(400, 400, color.NRGBA{0, 0, 0, 0}) + dst = imaging.Paste(dst, img1, image.Pt(0, 0)) + dst = imaging.Paste(dst, img2, image.Pt(0, 200)) + dst = imaging.Paste(dst, img3, image.Pt(200, 0)) + dst = imaging.Paste(dst, img4, image.Pt(200, 200)) + + // Save the resulting image as JPEG. + err = imaging.Save(dst, "testdata/out_example.jpg") + if err != nil { + log.Fatalf("failed to save image: %v", err) + } +} +``` + +Output: + +![dstImage](testdata/out_example.jpg) diff --git a/vendor/github.com/disintegration/imaging/adjust.go b/vendor/github.com/disintegration/imaging/adjust.go new file mode 100644 index 0000000..daaf1de --- /dev/null +++ b/vendor/github.com/disintegration/imaging/adjust.go @@ -0,0 +1,253 @@ +package imaging + +import ( + "image" + "image/color" + "math" +) + +// Grayscale produces a grayscale version of the image. +func Grayscale(img image.Image) *image.NRGBA { + src := newScanner(img) + dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h)) + parallel(0, src.h, func(ys <-chan int) { + for y := range ys { + i := y * dst.Stride + src.scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4]) + for x := 0; x < src.w; x++ { + d := dst.Pix[i : i+3 : i+3] + r := d[0] + g := d[1] + b := d[2] + f := 0.299*float64(r) + 0.587*float64(g) + 0.114*float64(b) + y := uint8(f + 0.5) + d[0] = y + d[1] = y + d[2] = y + i += 4 + } + } + }) + return dst +} + +// Invert produces an inverted (negated) version of the image. +func Invert(img image.Image) *image.NRGBA { + src := newScanner(img) + dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h)) + parallel(0, src.h, func(ys <-chan int) { + for y := range ys { + i := y * dst.Stride + src.scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4]) + for x := 0; x < src.w; x++ { + d := dst.Pix[i : i+3 : i+3] + d[0] = 255 - d[0] + d[1] = 255 - d[1] + d[2] = 255 - d[2] + i += 4 + } + } + }) + return dst +} + +// AdjustSaturation changes the saturation of the image using the percentage parameter and returns the adjusted image. +// The percentage must be in the range (-100, 100). +// The percentage = 0 gives the original image. +// The percentage = 100 gives the image with the saturation value doubled for each pixel. +// The percentage = -100 gives the image with the saturation value zeroed for each pixel (grayscale). +// +// Examples: +// dstImage = imaging.AdjustSaturation(srcImage, 25) // Increase image saturation by 25%. +// dstImage = imaging.AdjustSaturation(srcImage, -10) // Decrease image saturation by 10%. +// +func AdjustSaturation(img image.Image, percentage float64) *image.NRGBA { + percentage = math.Min(math.Max(percentage, -100), 100) + multiplier := 1 + percentage/100 + + return AdjustFunc(img, func(c color.NRGBA) color.NRGBA { + h, s, l := rgbToHSL(c.R, c.G, c.B) + s *= multiplier + if s > 1 { + s = 1 + } + r, g, b := hslToRGB(h, s, l) + return color.NRGBA{r, g, b, c.A} + }) +} + +// AdjustContrast changes the contrast of the image using the percentage parameter and returns the adjusted image. +// The percentage must be in range (-100, 100). The percentage = 0 gives the original image. +// The percentage = -100 gives solid gray image. +// +// Examples: +// +// dstImage = imaging.AdjustContrast(srcImage, -10) // Decrease image contrast by 10%. +// dstImage = imaging.AdjustContrast(srcImage, 20) // Increase image contrast by 20%. +// +func AdjustContrast(img image.Image, percentage float64) *image.NRGBA { + percentage = math.Min(math.Max(percentage, -100.0), 100.0) + lut := make([]uint8, 256) + + v := (100.0 + percentage) / 100.0 + for i := 0; i < 256; i++ { + switch { + case 0 <= v && v <= 1: + lut[i] = clamp((0.5 + (float64(i)/255.0-0.5)*v) * 255.0) + case 1 < v && v < 2: + lut[i] = clamp((0.5 + (float64(i)/255.0-0.5)*(1/(2.0-v))) * 255.0) + default: + lut[i] = uint8(float64(i)/255.0+0.5) * 255 + } + } + + return adjustLUT(img, lut) +} + +// AdjustBrightness changes the brightness of the image using the percentage parameter and returns the adjusted image. +// The percentage must be in range (-100, 100). The percentage = 0 gives the original image. +// The percentage = -100 gives solid black image. The percentage = 100 gives solid white image. +// +// Examples: +// +// dstImage = imaging.AdjustBrightness(srcImage, -15) // Decrease image brightness by 15%. +// dstImage = imaging.AdjustBrightness(srcImage, 10) // Increase image brightness by 10%. +// +func AdjustBrightness(img image.Image, percentage float64) *image.NRGBA { + percentage = math.Min(math.Max(percentage, -100.0), 100.0) + lut := make([]uint8, 256) + + shift := 255.0 * percentage / 100.0 + for i := 0; i < 256; i++ { + lut[i] = clamp(float64(i) + shift) + } + + return adjustLUT(img, lut) +} + +// AdjustGamma performs a gamma correction on the image and returns the adjusted image. +// Gamma parameter must be positive. Gamma = 1.0 gives the original image. +// Gamma less than 1.0 darkens the image and gamma greater than 1.0 lightens it. +// +// Example: +// +// dstImage = imaging.AdjustGamma(srcImage, 0.7) +// +func AdjustGamma(img image.Image, gamma float64) *image.NRGBA { + e := 1.0 / math.Max(gamma, 0.0001) + lut := make([]uint8, 256) + + for i := 0; i < 256; i++ { + lut[i] = clamp(math.Pow(float64(i)/255.0, e) * 255.0) + } + + return adjustLUT(img, lut) +} + +// AdjustSigmoid changes the contrast of the image using a sigmoidal function and returns the adjusted image. +// It's a non-linear contrast change useful for photo adjustments as it preserves highlight and shadow detail. +// The midpoint parameter is the midpoint of contrast that must be between 0 and 1, typically 0.5. +// The factor parameter indicates how much to increase or decrease the contrast, typically in range (-10, 10). +// If the factor parameter is positive the image contrast is increased otherwise the contrast is decreased. +// +// Examples: +// +// dstImage = imaging.AdjustSigmoid(srcImage, 0.5, 3.0) // Increase the contrast. +// dstImage = imaging.AdjustSigmoid(srcImage, 0.5, -3.0) // Decrease the contrast. +// +func AdjustSigmoid(img image.Image, midpoint, factor float64) *image.NRGBA { + if factor == 0 { + return Clone(img) + } + + lut := make([]uint8, 256) + a := math.Min(math.Max(midpoint, 0.0), 1.0) + b := math.Abs(factor) + sig0 := sigmoid(a, b, 0) + sig1 := sigmoid(a, b, 1) + e := 1.0e-6 + + if factor > 0 { + for i := 0; i < 256; i++ { + x := float64(i) / 255.0 + sigX := sigmoid(a, b, x) + f := (sigX - sig0) / (sig1 - sig0) + lut[i] = clamp(f * 255.0) + } + } else { + for i := 0; i < 256; i++ { + x := float64(i) / 255.0 + arg := math.Min(math.Max((sig1-sig0)*x+sig0, e), 1.0-e) + f := a - math.Log(1.0/arg-1.0)/b + lut[i] = clamp(f * 255.0) + } + } + + return adjustLUT(img, lut) +} + +func sigmoid(a, b, x float64) float64 { + return 1 / (1 + math.Exp(b*(a-x))) +} + +// adjustLUT applies the given lookup table to the colors of the image. +func adjustLUT(img image.Image, lut []uint8) *image.NRGBA { + src := newScanner(img) + dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h)) + lut = lut[0:256] + parallel(0, src.h, func(ys <-chan int) { + for y := range ys { + i := y * dst.Stride + src.scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4]) + for x := 0; x < src.w; x++ { + d := dst.Pix[i : i+3 : i+3] + d[0] = lut[d[0]] + d[1] = lut[d[1]] + d[2] = lut[d[2]] + i += 4 + } + } + }) + return dst +} + +// AdjustFunc applies the fn function to each pixel of the img image and returns the adjusted image. +// +// Example: +// +// dstImage = imaging.AdjustFunc( +// srcImage, +// func(c color.NRGBA) color.NRGBA { +// // Shift the red channel by 16. +// r := int(c.R) + 16 +// if r > 255 { +// r = 255 +// } +// return color.NRGBA{uint8(r), c.G, c.B, c.A} +// } +// ) +// +func AdjustFunc(img image.Image, fn func(c color.NRGBA) color.NRGBA) *image.NRGBA { + src := newScanner(img) + dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h)) + parallel(0, src.h, func(ys <-chan int) { + for y := range ys { + i := y * dst.Stride + src.scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4]) + for x := 0; x < src.w; x++ { + d := dst.Pix[i : i+4 : i+4] + r := d[0] + g := d[1] + b := d[2] + a := d[3] + c := fn(color.NRGBA{r, g, b, a}) + d[0] = c.R + d[1] = c.G + d[2] = c.B + d[3] = c.A + i += 4 + } + } + }) + return dst +} diff --git a/vendor/github.com/disintegration/imaging/convolution.go b/vendor/github.com/disintegration/imaging/convolution.go new file mode 100644 index 0000000..11eddc1 --- /dev/null +++ b/vendor/github.com/disintegration/imaging/convolution.go @@ -0,0 +1,148 @@ +package imaging + +import ( + "image" +) + +// ConvolveOptions are convolution parameters. +type ConvolveOptions struct { + // If Normalize is true the kernel is normalized before convolution. + Normalize bool + + // If Abs is true the absolute value of each color channel is taken after convolution. + Abs bool + + // Bias is added to each color channel value after convolution. + Bias int +} + +// Convolve3x3 convolves the image with the specified 3x3 convolution kernel. +// Default parameters are used if a nil *ConvolveOptions is passed. +func Convolve3x3(img image.Image, kernel [9]float64, options *ConvolveOptions) *image.NRGBA { + return convolve(img, kernel[:], options) +} + +// Convolve5x5 convolves the image with the specified 5x5 convolution kernel. +// Default parameters are used if a nil *ConvolveOptions is passed. +func Convolve5x5(img image.Image, kernel [25]float64, options *ConvolveOptions) *image.NRGBA { + return convolve(img, kernel[:], options) +} + +func convolve(img image.Image, kernel []float64, options *ConvolveOptions) *image.NRGBA { + src := toNRGBA(img) + w := src.Bounds().Max.X + h := src.Bounds().Max.Y + dst := image.NewNRGBA(image.Rect(0, 0, w, h)) + + if w < 1 || h < 1 { + return dst + } + + if options == nil { + options = &ConvolveOptions{} + } + + if options.Normalize { + normalizeKernel(kernel) + } + + type coef struct { + x, y int + k float64 + } + var coefs []coef + var m int + + switch len(kernel) { + case 9: + m = 1 + case 25: + m = 2 + } + + i := 0 + for y := -m; y <= m; y++ { + for x := -m; x <= m; x++ { + if kernel[i] != 0 { + coefs = append(coefs, coef{x: x, y: y, k: kernel[i]}) + } + i++ + } + } + + parallel(0, h, func(ys <-chan int) { + for y := range ys { + for x := 0; x < w; x++ { + var r, g, b float64 + for _, c := range coefs { + ix := x + c.x + if ix < 0 { + ix = 0 + } else if ix >= w { + ix = w - 1 + } + + iy := y + c.y + if iy < 0 { + iy = 0 + } else if iy >= h { + iy = h - 1 + } + + off := iy*src.Stride + ix*4 + s := src.Pix[off : off+3 : off+3] + r += float64(s[0]) * c.k + g += float64(s[1]) * c.k + b += float64(s[2]) * c.k + } + + if options.Abs { + if r < 0 { + r = -r + } + if g < 0 { + g = -g + } + if b < 0 { + b = -b + } + } + + if options.Bias != 0 { + r += float64(options.Bias) + g += float64(options.Bias) + b += float64(options.Bias) + } + + srcOff := y*src.Stride + x*4 + dstOff := y*dst.Stride + x*4 + d := dst.Pix[dstOff : dstOff+4 : dstOff+4] + d[0] = clamp(r) + d[1] = clamp(g) + d[2] = clamp(b) + d[3] = src.Pix[srcOff+3] + } + } + }) + + return dst +} + +func normalizeKernel(kernel []float64) { + var sum, sumpos float64 + for i := range kernel { + sum += kernel[i] + if kernel[i] > 0 { + sumpos += kernel[i] + } + } + if sum != 0 { + for i := range kernel { + kernel[i] /= sum + } + } else if sumpos != 0 { + for i := range kernel { + kernel[i] /= sumpos + } + } +} diff --git a/vendor/github.com/disintegration/imaging/doc.go b/vendor/github.com/disintegration/imaging/doc.go new file mode 100644 index 0000000..c98c912 --- /dev/null +++ b/vendor/github.com/disintegration/imaging/doc.go @@ -0,0 +1,7 @@ +/* +Package imaging provides basic image processing functions (resize, rotate, crop, brightness/contrast adjustments, etc.). + +All the image processing functions provided by the package accept any image type that implements image.Image interface +as an input, and return a new image of *image.NRGBA type (32bit RGBA colors, non-premultiplied alpha). +*/ +package imaging diff --git a/vendor/github.com/disintegration/imaging/effects.go b/vendor/github.com/disintegration/imaging/effects.go new file mode 100644 index 0000000..47316b7 --- /dev/null +++ b/vendor/github.com/disintegration/imaging/effects.go @@ -0,0 +1,169 @@ +package imaging + +import ( + "image" + "math" +) + +func gaussianBlurKernel(x, sigma float64) float64 { + return math.Exp(-(x*x)/(2*sigma*sigma)) / (sigma * math.Sqrt(2*math.Pi)) +} + +// Blur produces a blurred version of the image using a Gaussian function. +// Sigma parameter must be positive and indicates how much the image will be blurred. +// +// Example: +// +// dstImage := imaging.Blur(srcImage, 3.5) +// +func Blur(img image.Image, sigma float64) *image.NRGBA { + if sigma <= 0 { + return Clone(img) + } + + radius := int(math.Ceil(sigma * 3.0)) + kernel := make([]float64, radius+1) + + for i := 0; i <= radius; i++ { + kernel[i] = gaussianBlurKernel(float64(i), sigma) + } + + return blurVertical(blurHorizontal(img, kernel), kernel) +} + +func blurHorizontal(img image.Image, kernel []float64) *image.NRGBA { + src := newScanner(img) + dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h)) + radius := len(kernel) - 1 + + parallel(0, src.h, func(ys <-chan int) { + scanLine := make([]uint8, src.w*4) + scanLineF := make([]float64, len(scanLine)) + for y := range ys { + src.scan(0, y, src.w, y+1, scanLine) + for i, v := range scanLine { + scanLineF[i] = float64(v) + } + for x := 0; x < src.w; x++ { + min := x - radius + if min < 0 { + min = 0 + } + max := x + radius + if max > src.w-1 { + max = src.w - 1 + } + var r, g, b, a, wsum float64 + for ix := min; ix <= max; ix++ { + i := ix * 4 + weight := kernel[absint(x-ix)] + wsum += weight + s := scanLineF[i : i+4 : i+4] + wa := s[3] * weight + r += s[0] * wa + g += s[1] * wa + b += s[2] * wa + a += wa + } + if a != 0 { + aInv := 1 / a + j := y*dst.Stride + x*4 + d := dst.Pix[j : j+4 : j+4] + d[0] = clamp(r * aInv) + d[1] = clamp(g * aInv) + d[2] = clamp(b * aInv) + d[3] = clamp(a / wsum) + } + } + } + }) + + return dst +} + +func blurVertical(img image.Image, kernel []float64) *image.NRGBA { + src := newScanner(img) + dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h)) + radius := len(kernel) - 1 + + parallel(0, src.w, func(xs <-chan int) { + scanLine := make([]uint8, src.h*4) + scanLineF := make([]float64, len(scanLine)) + for x := range xs { + src.scan(x, 0, x+1, src.h, scanLine) + for i, v := range scanLine { + scanLineF[i] = float64(v) + } + for y := 0; y < src.h; y++ { + min := y - radius + if min < 0 { + min = 0 + } + max := y + radius + if max > src.h-1 { + max = src.h - 1 + } + var r, g, b, a, wsum float64 + for iy := min; iy <= max; iy++ { + i := iy * 4 + weight := kernel[absint(y-iy)] + wsum += weight + s := scanLineF[i : i+4 : i+4] + wa := s[3] * weight + r += s[0] * wa + g += s[1] * wa + b += s[2] * wa + a += wa + } + if a != 0 { + aInv := 1 / a + j := y*dst.Stride + x*4 + d := dst.Pix[j : j+4 : j+4] + d[0] = clamp(r * aInv) + d[1] = clamp(g * aInv) + d[2] = clamp(b * aInv) + d[3] = clamp(a / wsum) + } + } + } + }) + + return dst +} + +// Sharpen produces a sharpened version of the image. +// Sigma parameter must be positive and indicates how much the image will be sharpened. +// +// Example: +// +// dstImage := imaging.Sharpen(srcImage, 3.5) +// +func Sharpen(img image.Image, sigma float64) *image.NRGBA { + if sigma <= 0 { + return Clone(img) + } + + src := newScanner(img) + dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h)) + blurred := Blur(img, sigma) + + parallel(0, src.h, func(ys <-chan int) { + scanLine := make([]uint8, src.w*4) + for y := range ys { + src.scan(0, y, src.w, y+1, scanLine) + j := y * dst.Stride + for i := 0; i < src.w*4; i++ { + val := int(scanLine[i])<<1 - int(blurred.Pix[j]) + if val < 0 { + val = 0 + } else if val > 0xff { + val = 0xff + } + dst.Pix[j] = uint8(val) + j++ + } + } + }) + + return dst +} diff --git a/vendor/github.com/disintegration/imaging/go.mod b/vendor/github.com/disintegration/imaging/go.mod new file mode 100644 index 0000000..a870810 --- /dev/null +++ b/vendor/github.com/disintegration/imaging/go.mod @@ -0,0 +1,3 @@ +module github.com/disintegration/imaging + +require golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8 diff --git a/vendor/github.com/disintegration/imaging/go.sum b/vendor/github.com/disintegration/imaging/go.sum new file mode 100644 index 0000000..17bf738 --- /dev/null +++ b/vendor/github.com/disintegration/imaging/go.sum @@ -0,0 +1,3 @@ +golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8 h1:hVwzHzIUGRjiF7EcUjqNxk3NCfkPxbDKRdnNE1Rpg0U= +golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/disintegration/imaging/histogram.go b/vendor/github.com/disintegration/imaging/histogram.go new file mode 100644 index 0000000..c547fe8 --- /dev/null +++ b/vendor/github.com/disintegration/imaging/histogram.go @@ -0,0 +1,52 @@ +package imaging + +import ( + "image" + "sync" +) + +// Histogram returns a normalized histogram of an image. +// +// Resulting histogram is represented as an array of 256 floats, where +// histogram[i] is a probability of a pixel being of a particular luminance i. +func Histogram(img image.Image) [256]float64 { + var mu sync.Mutex + var histogram [256]float64 + var total float64 + + src := newScanner(img) + if src.w == 0 || src.h == 0 { + return histogram + } + + parallel(0, src.h, func(ys <-chan int) { + var tmpHistogram [256]float64 + var tmpTotal float64 + scanLine := make([]uint8, src.w*4) + for y := range ys { + src.scan(0, y, src.w, y+1, scanLine) + i := 0 + for x := 0; x < src.w; x++ { + s := scanLine[i : i+3 : i+3] + r := s[0] + g := s[1] + b := s[2] + y := 0.299*float32(r) + 0.587*float32(g) + 0.114*float32(b) + tmpHistogram[int(y+0.5)]++ + tmpTotal++ + i += 4 + } + } + mu.Lock() + for i := 0; i < 256; i++ { + histogram[i] += tmpHistogram[i] + } + total += tmpTotal + mu.Unlock() + }) + + for i := 0; i < 256; i++ { + histogram[i] = histogram[i] / total + } + return histogram +} diff --git a/vendor/github.com/disintegration/imaging/io.go b/vendor/github.com/disintegration/imaging/io.go new file mode 100644 index 0000000..f6c6da8 --- /dev/null +++ b/vendor/github.com/disintegration/imaging/io.go @@ -0,0 +1,444 @@ +package imaging + +import ( + "encoding/binary" + "errors" + "image" + "image/draw" + "image/gif" + "image/jpeg" + "image/png" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "golang.org/x/image/bmp" + "golang.org/x/image/tiff" +) + +type fileSystem interface { + Create(string) (io.WriteCloser, error) + Open(string) (io.ReadCloser, error) +} + +type localFS struct{} + +func (localFS) Create(name string) (io.WriteCloser, error) { return os.Create(name) } +func (localFS) Open(name string) (io.ReadCloser, error) { return os.Open(name) } + +var fs fileSystem = localFS{} + +type decodeConfig struct { + autoOrientation bool +} + +var defaultDecodeConfig = decodeConfig{ + autoOrientation: false, +} + +// DecodeOption sets an optional parameter for the Decode and Open functions. +type DecodeOption func(*decodeConfig) + +// AutoOrientation returns a DecodeOption that sets the auto-orientation mode. +// If auto-orientation is enabled, the image will be transformed after decoding +// according to the EXIF orientation tag (if present). By default it's disabled. +func AutoOrientation(enabled bool) DecodeOption { + return func(c *decodeConfig) { + c.autoOrientation = enabled + } +} + +// Decode reads an image from r. +func Decode(r io.Reader, opts ...DecodeOption) (image.Image, error) { + cfg := defaultDecodeConfig + for _, option := range opts { + option(&cfg) + } + + if !cfg.autoOrientation { + img, _, err := image.Decode(r) + return img, err + } + + var orient orientation + pr, pw := io.Pipe() + r = io.TeeReader(r, pw) + done := make(chan struct{}) + go func() { + defer close(done) + orient = readOrientation(pr) + io.Copy(ioutil.Discard, pr) + }() + + img, _, err := image.Decode(r) + pw.Close() + <-done + if err != nil { + return nil, err + } + + return fixOrientation(img, orient), nil +} + +// Open loads an image from file. +// +// Examples: +// +// // Load an image from file. +// img, err := imaging.Open("test.jpg") +// +// // Load an image and transform it depending on the EXIF orientation tag (if present). +// img, err := imaging.Open("test.jpg", imaging.AutoOrientation(true)) +// +func Open(filename string, opts ...DecodeOption) (image.Image, error) { + file, err := fs.Open(filename) + if err != nil { + return nil, err + } + defer file.Close() + return Decode(file, opts...) +} + +// Format is an image file format. +type Format int + +// Image file formats. +const ( + JPEG Format = iota + PNG + GIF + TIFF + BMP +) + +var formatExts = map[string]Format{ + "jpg": JPEG, + "jpeg": JPEG, + "png": PNG, + "gif": GIF, + "tif": TIFF, + "tiff": TIFF, + "bmp": BMP, +} + +var formatNames = map[Format]string{ + JPEG: "JPEG", + PNG: "PNG", + GIF: "GIF", + TIFF: "TIFF", + BMP: "BMP", +} + +func (f Format) String() string { + return formatNames[f] +} + +// ErrUnsupportedFormat means the given image format is not supported. +var ErrUnsupportedFormat = errors.New("imaging: unsupported image format") + +// FormatFromExtension parses image format from filename extension: +// "jpg" (or "jpeg"), "png", "gif", "tif" (or "tiff") and "bmp" are supported. +func FormatFromExtension(ext string) (Format, error) { + if f, ok := formatExts[strings.ToLower(strings.TrimPrefix(ext, "."))]; ok { + return f, nil + } + return -1, ErrUnsupportedFormat +} + +// FormatFromFilename parses image format from filename: +// "jpg" (or "jpeg"), "png", "gif", "tif" (or "tiff") and "bmp" are supported. +func FormatFromFilename(filename string) (Format, error) { + ext := filepath.Ext(filename) + return FormatFromExtension(ext) +} + +type encodeConfig struct { + jpegQuality int + gifNumColors int + gifQuantizer draw.Quantizer + gifDrawer draw.Drawer + pngCompressionLevel png.CompressionLevel +} + +var defaultEncodeConfig = encodeConfig{ + jpegQuality: 95, + gifNumColors: 256, + gifQuantizer: nil, + gifDrawer: nil, + pngCompressionLevel: png.DefaultCompression, +} + +// EncodeOption sets an optional parameter for the Encode and Save functions. +type EncodeOption func(*encodeConfig) + +// JPEGQuality returns an EncodeOption that sets the output JPEG quality. +// Quality ranges from 1 to 100 inclusive, higher is better. Default is 95. +func JPEGQuality(quality int) EncodeOption { + return func(c *encodeConfig) { + c.jpegQuality = quality + } +} + +// GIFNumColors returns an EncodeOption that sets the maximum number of colors +// used in the GIF-encoded image. It ranges from 1 to 256. Default is 256. +func GIFNumColors(numColors int) EncodeOption { + return func(c *encodeConfig) { + c.gifNumColors = numColors + } +} + +// GIFQuantizer returns an EncodeOption that sets the quantizer that is used to produce +// a palette of the GIF-encoded image. +func GIFQuantizer(quantizer draw.Quantizer) EncodeOption { + return func(c *encodeConfig) { + c.gifQuantizer = quantizer + } +} + +// GIFDrawer returns an EncodeOption that sets the drawer that is used to convert +// the source image to the desired palette of the GIF-encoded image. +func GIFDrawer(drawer draw.Drawer) EncodeOption { + return func(c *encodeConfig) { + c.gifDrawer = drawer + } +} + +// PNGCompressionLevel returns an EncodeOption that sets the compression level +// of the PNG-encoded image. Default is png.DefaultCompression. +func PNGCompressionLevel(level png.CompressionLevel) EncodeOption { + return func(c *encodeConfig) { + c.pngCompressionLevel = level + } +} + +// Encode writes the image img to w in the specified format (JPEG, PNG, GIF, TIFF or BMP). +func Encode(w io.Writer, img image.Image, format Format, opts ...EncodeOption) error { + cfg := defaultEncodeConfig + for _, option := range opts { + option(&cfg) + } + + switch format { + case JPEG: + if nrgba, ok := img.(*image.NRGBA); ok && nrgba.Opaque() { + rgba := &image.RGBA{ + Pix: nrgba.Pix, + Stride: nrgba.Stride, + Rect: nrgba.Rect, + } + return jpeg.Encode(w, rgba, &jpeg.Options{Quality: cfg.jpegQuality}) + } + return jpeg.Encode(w, img, &jpeg.Options{Quality: cfg.jpegQuality}) + + case PNG: + encoder := png.Encoder{CompressionLevel: cfg.pngCompressionLevel} + return encoder.Encode(w, img) + + case GIF: + return gif.Encode(w, img, &gif.Options{ + NumColors: cfg.gifNumColors, + Quantizer: cfg.gifQuantizer, + Drawer: cfg.gifDrawer, + }) + + case TIFF: + return tiff.Encode(w, img, &tiff.Options{Compression: tiff.Deflate, Predictor: true}) + + case BMP: + return bmp.Encode(w, img) + } + + return ErrUnsupportedFormat +} + +// Save saves the image to file with the specified filename. +// The format is determined from the filename extension: +// "jpg" (or "jpeg"), "png", "gif", "tif" (or "tiff") and "bmp" are supported. +// +// Examples: +// +// // Save the image as PNG. +// err := imaging.Save(img, "out.png") +// +// // Save the image as JPEG with optional quality parameter set to 80. +// err := imaging.Save(img, "out.jpg", imaging.JPEGQuality(80)) +// +func Save(img image.Image, filename string, opts ...EncodeOption) (err error) { + f, err := FormatFromFilename(filename) + if err != nil { + return err + } + file, err := fs.Create(filename) + if err != nil { + return err + } + err = Encode(file, img, f, opts...) + errc := file.Close() + if err == nil { + err = errc + } + return err +} + +// orientation is an EXIF flag that specifies the transformation +// that should be applied to image to display it correctly. +type orientation int + +const ( + orientationUnspecified = 0 + orientationNormal = 1 + orientationFlipH = 2 + orientationRotate180 = 3 + orientationFlipV = 4 + orientationTranspose = 5 + orientationRotate270 = 6 + orientationTransverse = 7 + orientationRotate90 = 8 +) + +// readOrientation tries to read the orientation EXIF flag from image data in r. +// If the EXIF data block is not found or the orientation flag is not found +// or any other error occures while reading the data, it returns the +// orientationUnspecified (0) value. +func readOrientation(r io.Reader) orientation { + const ( + markerSOI = 0xffd8 + markerAPP1 = 0xffe1 + exifHeader = 0x45786966 + byteOrderBE = 0x4d4d + byteOrderLE = 0x4949 + orientationTag = 0x0112 + ) + + // Check if JPEG SOI marker is present. + var soi uint16 + if err := binary.Read(r, binary.BigEndian, &soi); err != nil { + return orientationUnspecified + } + if soi != markerSOI { + return orientationUnspecified // Missing JPEG SOI marker. + } + + // Find JPEG APP1 marker. + for { + var marker, size uint16 + if err := binary.Read(r, binary.BigEndian, &marker); err != nil { + return orientationUnspecified + } + if err := binary.Read(r, binary.BigEndian, &size); err != nil { + return orientationUnspecified + } + if marker>>8 != 0xff { + return orientationUnspecified // Invalid JPEG marker. + } + if marker == markerAPP1 { + break + } + if size < 2 { + return orientationUnspecified // Invalid block size. + } + if _, err := io.CopyN(ioutil.Discard, r, int64(size-2)); err != nil { + return orientationUnspecified + } + } + + // Check if EXIF header is present. + var header uint32 + if err := binary.Read(r, binary.BigEndian, &header); err != nil { + return orientationUnspecified + } + if header != exifHeader { + return orientationUnspecified + } + if _, err := io.CopyN(ioutil.Discard, r, 2); err != nil { + return orientationUnspecified + } + + // Read byte order information. + var ( + byteOrderTag uint16 + byteOrder binary.ByteOrder + ) + if err := binary.Read(r, binary.BigEndian, &byteOrderTag); err != nil { + return orientationUnspecified + } + switch byteOrderTag { + case byteOrderBE: + byteOrder = binary.BigEndian + case byteOrderLE: + byteOrder = binary.LittleEndian + default: + return orientationUnspecified // Invalid byte order flag. + } + if _, err := io.CopyN(ioutil.Discard, r, 2); err != nil { + return orientationUnspecified + } + + // Skip the EXIF offset. + var offset uint32 + if err := binary.Read(r, byteOrder, &offset); err != nil { + return orientationUnspecified + } + if offset < 8 { + return orientationUnspecified // Invalid offset value. + } + if _, err := io.CopyN(ioutil.Discard, r, int64(offset-8)); err != nil { + return orientationUnspecified + } + + // Read the number of tags. + var numTags uint16 + if err := binary.Read(r, byteOrder, &numTags); err != nil { + return orientationUnspecified + } + + // Find the orientation tag. + for i := 0; i < int(numTags); i++ { + var tag uint16 + if err := binary.Read(r, byteOrder, &tag); err != nil { + return orientationUnspecified + } + if tag != orientationTag { + if _, err := io.CopyN(ioutil.Discard, r, 10); err != nil { + return orientationUnspecified + } + continue + } + if _, err := io.CopyN(ioutil.Discard, r, 6); err != nil { + return orientationUnspecified + } + var val uint16 + if err := binary.Read(r, byteOrder, &val); err != nil { + return orientationUnspecified + } + if val < 1 || val > 8 { + return orientationUnspecified // Invalid tag value. + } + return orientation(val) + } + return orientationUnspecified // Missing orientation tag. +} + +// fixOrientation applies a transform to img corresponding to the given orientation flag. +func fixOrientation(img image.Image, o orientation) image.Image { + switch o { + case orientationNormal: + case orientationFlipH: + img = FlipH(img) + case orientationFlipV: + img = FlipV(img) + case orientationRotate90: + img = Rotate90(img) + case orientationRotate180: + img = Rotate180(img) + case orientationRotate270: + img = Rotate270(img) + case orientationTranspose: + img = Transpose(img) + case orientationTransverse: + img = Transverse(img) + } + return img +} diff --git a/vendor/github.com/disintegration/imaging/resize.go b/vendor/github.com/disintegration/imaging/resize.go new file mode 100644 index 0000000..706435e --- /dev/null +++ b/vendor/github.com/disintegration/imaging/resize.go @@ -0,0 +1,595 @@ +package imaging + +import ( + "image" + "math" +) + +type indexWeight struct { + index int + weight float64 +} + +func precomputeWeights(dstSize, srcSize int, filter ResampleFilter) [][]indexWeight { + du := float64(srcSize) / float64(dstSize) + scale := du + if scale < 1.0 { + scale = 1.0 + } + ru := math.Ceil(scale * filter.Support) + + out := make([][]indexWeight, dstSize) + tmp := make([]indexWeight, 0, dstSize*int(ru+2)*2) + + for v := 0; v < dstSize; v++ { + fu := (float64(v)+0.5)*du - 0.5 + + begin := int(math.Ceil(fu - ru)) + if begin < 0 { + begin = 0 + } + end := int(math.Floor(fu + ru)) + if end > srcSize-1 { + end = srcSize - 1 + } + + var sum float64 + for u := begin; u <= end; u++ { + w := filter.Kernel((float64(u) - fu) / scale) + if w != 0 { + sum += w + tmp = append(tmp, indexWeight{index: u, weight: w}) + } + } + if sum != 0 { + for i := range tmp { + tmp[i].weight /= sum + } + } + + out[v] = tmp + tmp = tmp[len(tmp):] + } + + return out +} + +// Resize resizes the image to the specified width and height using the specified resampling +// filter and returns the transformed image. If one of width or height is 0, the image aspect +// ratio is preserved. +// +// Example: +// +// dstImage := imaging.Resize(srcImage, 800, 600, imaging.Lanczos) +// +func Resize(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA { + dstW, dstH := width, height + if dstW < 0 || dstH < 0 { + return &image.NRGBA{} + } + if dstW == 0 && dstH == 0 { + return &image.NRGBA{} + } + + srcW := img.Bounds().Dx() + srcH := img.Bounds().Dy() + if srcW <= 0 || srcH <= 0 { + return &image.NRGBA{} + } + + // If new width or height is 0 then preserve aspect ratio, minimum 1px. + if dstW == 0 { + tmpW := float64(dstH) * float64(srcW) / float64(srcH) + dstW = int(math.Max(1.0, math.Floor(tmpW+0.5))) + } + if dstH == 0 { + tmpH := float64(dstW) * float64(srcH) / float64(srcW) + dstH = int(math.Max(1.0, math.Floor(tmpH+0.5))) + } + + if filter.Support <= 0 { + // Nearest-neighbor special case. + return resizeNearest(img, dstW, dstH) + } + + if srcW != dstW && srcH != dstH { + return resizeVertical(resizeHorizontal(img, dstW, filter), dstH, filter) + } + if srcW != dstW { + return resizeHorizontal(img, dstW, filter) + } + if srcH != dstH { + return resizeVertical(img, dstH, filter) + } + return Clone(img) +} + +func resizeHorizontal(img image.Image, width int, filter ResampleFilter) *image.NRGBA { + src := newScanner(img) + dst := image.NewNRGBA(image.Rect(0, 0, width, src.h)) + weights := precomputeWeights(width, src.w, filter) + parallel(0, src.h, func(ys <-chan int) { + scanLine := make([]uint8, src.w*4) + for y := range ys { + src.scan(0, y, src.w, y+1, scanLine) + j0 := y * dst.Stride + for x := range weights { + var r, g, b, a float64 + for _, w := range weights[x] { + i := w.index * 4 + s := scanLine[i : i+4 : i+4] + aw := float64(s[3]) * w.weight + r += float64(s[0]) * aw + g += float64(s[1]) * aw + b += float64(s[2]) * aw + a += aw + } + if a != 0 { + aInv := 1 / a + j := j0 + x*4 + d := dst.Pix[j : j+4 : j+4] + d[0] = clamp(r * aInv) + d[1] = clamp(g * aInv) + d[2] = clamp(b * aInv) + d[3] = clamp(a) + } + } + } + }) + return dst +} + +func resizeVertical(img image.Image, height int, filter ResampleFilter) *image.NRGBA { + src := newScanner(img) + dst := image.NewNRGBA(image.Rect(0, 0, src.w, height)) + weights := precomputeWeights(height, src.h, filter) + parallel(0, src.w, func(xs <-chan int) { + scanLine := make([]uint8, src.h*4) + for x := range xs { + src.scan(x, 0, x+1, src.h, scanLine) + for y := range weights { + var r, g, b, a float64 + for _, w := range weights[y] { + i := w.index * 4 + s := scanLine[i : i+4 : i+4] + aw := float64(s[3]) * w.weight + r += float64(s[0]) * aw + g += float64(s[1]) * aw + b += float64(s[2]) * aw + a += aw + } + if a != 0 { + aInv := 1 / a + j := y*dst.Stride + x*4 + d := dst.Pix[j : j+4 : j+4] + d[0] = clamp(r * aInv) + d[1] = clamp(g * aInv) + d[2] = clamp(b * aInv) + d[3] = clamp(a) + } + } + } + }) + return dst +} + +// resizeNearest is a fast nearest-neighbor resize, no filtering. +func resizeNearest(img image.Image, width, height int) *image.NRGBA { + dst := image.NewNRGBA(image.Rect(0, 0, width, height)) + dx := float64(img.Bounds().Dx()) / float64(width) + dy := float64(img.Bounds().Dy()) / float64(height) + + if dx > 1 && dy > 1 { + src := newScanner(img) + parallel(0, height, func(ys <-chan int) { + for y := range ys { + srcY := int((float64(y) + 0.5) * dy) + dstOff := y * dst.Stride + for x := 0; x < width; x++ { + srcX := int((float64(x) + 0.5) * dx) + src.scan(srcX, srcY, srcX+1, srcY+1, dst.Pix[dstOff:dstOff+4]) + dstOff += 4 + } + } + }) + } else { + src := toNRGBA(img) + parallel(0, height, func(ys <-chan int) { + for y := range ys { + srcY := int((float64(y) + 0.5) * dy) + srcOff0 := srcY * src.Stride + dstOff := y * dst.Stride + for x := 0; x < width; x++ { + srcX := int((float64(x) + 0.5) * dx) + srcOff := srcOff0 + srcX*4 + copy(dst.Pix[dstOff:dstOff+4], src.Pix[srcOff:srcOff+4]) + dstOff += 4 + } + } + }) + } + + return dst +} + +// Fit scales down the image using the specified resample filter to fit the specified +// maximum width and height and returns the transformed image. +// +// Example: +// +// dstImage := imaging.Fit(srcImage, 800, 600, imaging.Lanczos) +// +func Fit(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA { + maxW, maxH := width, height + + if maxW <= 0 || maxH <= 0 { + return &image.NRGBA{} + } + + srcBounds := img.Bounds() + srcW := srcBounds.Dx() + srcH := srcBounds.Dy() + + if srcW <= 0 || srcH <= 0 { + return &image.NRGBA{} + } + + if srcW <= maxW && srcH <= maxH { + return Clone(img) + } + + srcAspectRatio := float64(srcW) / float64(srcH) + maxAspectRatio := float64(maxW) / float64(maxH) + + var newW, newH int + if srcAspectRatio > maxAspectRatio { + newW = maxW + newH = int(float64(newW) / srcAspectRatio) + } else { + newH = maxH + newW = int(float64(newH) * srcAspectRatio) + } + + return Resize(img, newW, newH, filter) +} + +// Fill creates an image with the specified dimensions and fills it with the scaled source image. +// To achieve the correct aspect ratio without stretching, the source image will be cropped. +// +// Example: +// +// dstImage := imaging.Fill(srcImage, 800, 600, imaging.Center, imaging.Lanczos) +// +func Fill(img image.Image, width, height int, anchor Anchor, filter ResampleFilter) *image.NRGBA { + dstW, dstH := width, height + + if dstW <= 0 || dstH <= 0 { + return &image.NRGBA{} + } + + srcBounds := img.Bounds() + srcW := srcBounds.Dx() + srcH := srcBounds.Dy() + + if srcW <= 0 || srcH <= 0 { + return &image.NRGBA{} + } + + if srcW == dstW && srcH == dstH { + return Clone(img) + } + + if srcW >= 100 && srcH >= 100 { + return cropAndResize(img, dstW, dstH, anchor, filter) + } + return resizeAndCrop(img, dstW, dstH, anchor, filter) +} + +// cropAndResize crops the image to the smallest possible size that has the required aspect ratio using +// the given anchor point, then scales it to the specified dimensions and returns the transformed image. +// +// This is generally faster than resizing first, but may result in inaccuracies when used on small source images. +func cropAndResize(img image.Image, width, height int, anchor Anchor, filter ResampleFilter) *image.NRGBA { + dstW, dstH := width, height + + srcBounds := img.Bounds() + srcW := srcBounds.Dx() + srcH := srcBounds.Dy() + srcAspectRatio := float64(srcW) / float64(srcH) + dstAspectRatio := float64(dstW) / float64(dstH) + + var tmp *image.NRGBA + if srcAspectRatio < dstAspectRatio { + cropH := float64(srcW) * float64(dstH) / float64(dstW) + tmp = CropAnchor(img, srcW, int(math.Max(1, cropH)+0.5), anchor) + } else { + cropW := float64(srcH) * float64(dstW) / float64(dstH) + tmp = CropAnchor(img, int(math.Max(1, cropW)+0.5), srcH, anchor) + } + + return Resize(tmp, dstW, dstH, filter) +} + +// resizeAndCrop resizes the image to the smallest possible size that will cover the specified dimensions, +// crops the resized image to the specified dimensions using the given anchor point and returns +// the transformed image. +func resizeAndCrop(img image.Image, width, height int, anchor Anchor, filter ResampleFilter) *image.NRGBA { + dstW, dstH := width, height + + srcBounds := img.Bounds() + srcW := srcBounds.Dx() + srcH := srcBounds.Dy() + srcAspectRatio := float64(srcW) / float64(srcH) + dstAspectRatio := float64(dstW) / float64(dstH) + + var tmp *image.NRGBA + if srcAspectRatio < dstAspectRatio { + tmp = Resize(img, dstW, 0, filter) + } else { + tmp = Resize(img, 0, dstH, filter) + } + + return CropAnchor(tmp, dstW, dstH, anchor) +} + +// Thumbnail scales the image up or down using the specified resample filter, crops it +// to the specified width and hight and returns the transformed image. +// +// Example: +// +// dstImage := imaging.Thumbnail(srcImage, 100, 100, imaging.Lanczos) +// +func Thumbnail(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA { + return Fill(img, width, height, Center, filter) +} + +// ResampleFilter specifies a resampling filter to be used for image resizing. +// +// General filter recommendations: +// +// - Lanczos +// A high-quality resampling filter for photographic images yielding sharp results. +// +// - CatmullRom +// A sharp cubic filter that is faster than Lanczos filter while providing similar results. +// +// - MitchellNetravali +// A cubic filter that produces smoother results with less ringing artifacts than CatmullRom. +// +// - Linear +// Bilinear resampling filter, produces a smooth output. Faster than cubic filters. +// +// - Box +// Simple and fast averaging filter appropriate for downscaling. +// When upscaling it's similar to NearestNeighbor. +// +// - NearestNeighbor +// Fastest resampling filter, no antialiasing. +// +type ResampleFilter struct { + Support float64 + Kernel func(float64) float64 +} + +// NearestNeighbor is a nearest-neighbor filter (no anti-aliasing). +var NearestNeighbor ResampleFilter + +// Box filter (averaging pixels). +var Box ResampleFilter + +// Linear filter. +var Linear ResampleFilter + +// Hermite cubic spline filter (BC-spline; B=0; C=0). +var Hermite ResampleFilter + +// MitchellNetravali is Mitchell-Netravali cubic filter (BC-spline; B=1/3; C=1/3). +var MitchellNetravali ResampleFilter + +// CatmullRom is a Catmull-Rom - sharp cubic filter (BC-spline; B=0; C=0.5). +var CatmullRom ResampleFilter + +// BSpline is a smooth cubic filter (BC-spline; B=1; C=0). +var BSpline ResampleFilter + +// Gaussian is a Gaussian blurring filter. +var Gaussian ResampleFilter + +// Bartlett is a Bartlett-windowed sinc filter (3 lobes). +var Bartlett ResampleFilter + +// Lanczos filter (3 lobes). +var Lanczos ResampleFilter + +// Hann is a Hann-windowed sinc filter (3 lobes). +var Hann ResampleFilter + +// Hamming is a Hamming-windowed sinc filter (3 lobes). +var Hamming ResampleFilter + +// Blackman is a Blackman-windowed sinc filter (3 lobes). +var Blackman ResampleFilter + +// Welch is a Welch-windowed sinc filter (parabolic window, 3 lobes). +var Welch ResampleFilter + +// Cosine is a Cosine-windowed sinc filter (3 lobes). +var Cosine ResampleFilter + +func bcspline(x, b, c float64) float64 { + var y float64 + x = math.Abs(x) + if x < 1.0 { + y = ((12-9*b-6*c)*x*x*x + (-18+12*b+6*c)*x*x + (6 - 2*b)) / 6 + } else if x < 2.0 { + y = ((-b-6*c)*x*x*x + (6*b+30*c)*x*x + (-12*b-48*c)*x + (8*b + 24*c)) / 6 + } + return y +} + +func sinc(x float64) float64 { + if x == 0 { + return 1 + } + return math.Sin(math.Pi*x) / (math.Pi * x) +} + +func init() { + NearestNeighbor = ResampleFilter{ + Support: 0.0, // special case - not applying the filter + } + + Box = ResampleFilter{ + Support: 0.5, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x <= 0.5 { + return 1.0 + } + return 0 + }, + } + + Linear = ResampleFilter{ + Support: 1.0, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x < 1.0 { + return 1.0 - x + } + return 0 + }, + } + + Hermite = ResampleFilter{ + Support: 1.0, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x < 1.0 { + return bcspline(x, 0.0, 0.0) + } + return 0 + }, + } + + MitchellNetravali = ResampleFilter{ + Support: 2.0, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x < 2.0 { + return bcspline(x, 1.0/3.0, 1.0/3.0) + } + return 0 + }, + } + + CatmullRom = ResampleFilter{ + Support: 2.0, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x < 2.0 { + return bcspline(x, 0.0, 0.5) + } + return 0 + }, + } + + BSpline = ResampleFilter{ + Support: 2.0, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x < 2.0 { + return bcspline(x, 1.0, 0.0) + } + return 0 + }, + } + + Gaussian = ResampleFilter{ + Support: 2.0, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x < 2.0 { + return math.Exp(-2 * x * x) + } + return 0 + }, + } + + Bartlett = ResampleFilter{ + Support: 3.0, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x < 3.0 { + return sinc(x) * (3.0 - x) / 3.0 + } + return 0 + }, + } + + Lanczos = ResampleFilter{ + Support: 3.0, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x < 3.0 { + return sinc(x) * sinc(x/3.0) + } + return 0 + }, + } + + Hann = ResampleFilter{ + Support: 3.0, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x < 3.0 { + return sinc(x) * (0.5 + 0.5*math.Cos(math.Pi*x/3.0)) + } + return 0 + }, + } + + Hamming = ResampleFilter{ + Support: 3.0, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x < 3.0 { + return sinc(x) * (0.54 + 0.46*math.Cos(math.Pi*x/3.0)) + } + return 0 + }, + } + + Blackman = ResampleFilter{ + Support: 3.0, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x < 3.0 { + return sinc(x) * (0.42 - 0.5*math.Cos(math.Pi*x/3.0+math.Pi) + 0.08*math.Cos(2.0*math.Pi*x/3.0)) + } + return 0 + }, + } + + Welch = ResampleFilter{ + Support: 3.0, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x < 3.0 { + return sinc(x) * (1.0 - (x * x / 9.0)) + } + return 0 + }, + } + + Cosine = ResampleFilter{ + Support: 3.0, + Kernel: func(x float64) float64 { + x = math.Abs(x) + if x < 3.0 { + return sinc(x) * math.Cos((math.Pi/2.0)*(x/3.0)) + } + return 0 + }, + } +} diff --git a/vendor/github.com/disintegration/imaging/scanner.go b/vendor/github.com/disintegration/imaging/scanner.go new file mode 100644 index 0000000..37d92ce --- /dev/null +++ b/vendor/github.com/disintegration/imaging/scanner.go @@ -0,0 +1,285 @@ +package imaging + +import ( + "image" + "image/color" +) + +type scanner struct { + image image.Image + w, h int + palette []color.NRGBA +} + +func newScanner(img image.Image) *scanner { + s := &scanner{ + image: img, + w: img.Bounds().Dx(), + h: img.Bounds().Dy(), + } + if img, ok := img.(*image.Paletted); ok { + s.palette = make([]color.NRGBA, len(img.Palette)) + for i := 0; i < len(img.Palette); i++ { + s.palette[i] = color.NRGBAModel.Convert(img.Palette[i]).(color.NRGBA) + } + } + return s +} + +// scan scans the given rectangular region of the image into dst. +func (s *scanner) scan(x1, y1, x2, y2 int, dst []uint8) { + switch img := s.image.(type) { + case *image.NRGBA: + size := (x2 - x1) * 4 + j := 0 + i := y1*img.Stride + x1*4 + if size == 4 { + for y := y1; y < y2; y++ { + d := dst[j : j+4 : j+4] + s := img.Pix[i : i+4 : i+4] + d[0] = s[0] + d[1] = s[1] + d[2] = s[2] + d[3] = s[3] + j += size + i += img.Stride + } + } else { + for y := y1; y < y2; y++ { + copy(dst[j:j+size], img.Pix[i:i+size]) + j += size + i += img.Stride + } + } + + case *image.NRGBA64: + j := 0 + for y := y1; y < y2; y++ { + i := y*img.Stride + x1*8 + for x := x1; x < x2; x++ { + s := img.Pix[i : i+8 : i+8] + d := dst[j : j+4 : j+4] + d[0] = s[0] + d[1] = s[2] + d[2] = s[4] + d[3] = s[6] + j += 4 + i += 8 + } + } + + case *image.RGBA: + j := 0 + for y := y1; y < y2; y++ { + i := y*img.Stride + x1*4 + for x := x1; x < x2; x++ { + d := dst[j : j+4 : j+4] + a := img.Pix[i+3] + switch a { + case 0: + d[0] = 0 + d[1] = 0 + d[2] = 0 + d[3] = a + case 0xff: + s := img.Pix[i : i+4 : i+4] + d[0] = s[0] + d[1] = s[1] + d[2] = s[2] + d[3] = a + default: + s := img.Pix[i : i+4 : i+4] + r16 := uint16(s[0]) + g16 := uint16(s[1]) + b16 := uint16(s[2]) + a16 := uint16(a) + d[0] = uint8(r16 * 0xff / a16) + d[1] = uint8(g16 * 0xff / a16) + d[2] = uint8(b16 * 0xff / a16) + d[3] = a + } + j += 4 + i += 4 + } + } + + case *image.RGBA64: + j := 0 + for y := y1; y < y2; y++ { + i := y*img.Stride + x1*8 + for x := x1; x < x2; x++ { + s := img.Pix[i : i+8 : i+8] + d := dst[j : j+4 : j+4] + a := s[6] + switch a { + case 0: + d[0] = 0 + d[1] = 0 + d[2] = 0 + case 0xff: + d[0] = s[0] + d[1] = s[2] + d[2] = s[4] + default: + r32 := uint32(s[0])<<8 | uint32(s[1]) + g32 := uint32(s[2])<<8 | uint32(s[3]) + b32 := uint32(s[4])<<8 | uint32(s[5]) + a32 := uint32(s[6])<<8 | uint32(s[7]) + d[0] = uint8((r32 * 0xffff / a32) >> 8) + d[1] = uint8((g32 * 0xffff / a32) >> 8) + d[2] = uint8((b32 * 0xffff / a32) >> 8) + } + d[3] = a + j += 4 + i += 8 + } + } + + case *image.Gray: + j := 0 + for y := y1; y < y2; y++ { + i := y*img.Stride + x1 + for x := x1; x < x2; x++ { + c := img.Pix[i] + d := dst[j : j+4 : j+4] + d[0] = c + d[1] = c + d[2] = c + d[3] = 0xff + j += 4 + i++ + } + } + + case *image.Gray16: + j := 0 + for y := y1; y < y2; y++ { + i := y*img.Stride + x1*2 + for x := x1; x < x2; x++ { + c := img.Pix[i] + d := dst[j : j+4 : j+4] + d[0] = c + d[1] = c + d[2] = c + d[3] = 0xff + j += 4 + i += 2 + } + } + + case *image.YCbCr: + j := 0 + x1 += img.Rect.Min.X + x2 += img.Rect.Min.X + y1 += img.Rect.Min.Y + y2 += img.Rect.Min.Y + + hy := img.Rect.Min.Y / 2 + hx := img.Rect.Min.X / 2 + for y := y1; y < y2; y++ { + iy := (y-img.Rect.Min.Y)*img.YStride + (x1 - img.Rect.Min.X) + + var yBase int + switch img.SubsampleRatio { + case image.YCbCrSubsampleRatio444, image.YCbCrSubsampleRatio422: + yBase = (y - img.Rect.Min.Y) * img.CStride + case image.YCbCrSubsampleRatio420, image.YCbCrSubsampleRatio440: + yBase = (y/2 - hy) * img.CStride + } + + for x := x1; x < x2; x++ { + var ic int + switch img.SubsampleRatio { + case image.YCbCrSubsampleRatio444, image.YCbCrSubsampleRatio440: + ic = yBase + (x - img.Rect.Min.X) + case image.YCbCrSubsampleRatio422, image.YCbCrSubsampleRatio420: + ic = yBase + (x/2 - hx) + default: + ic = img.COffset(x, y) + } + + yy1 := int32(img.Y[iy]) * 0x10101 + cb1 := int32(img.Cb[ic]) - 128 + cr1 := int32(img.Cr[ic]) - 128 + + r := yy1 + 91881*cr1 + if uint32(r)&0xff000000 == 0 { + r >>= 16 + } else { + r = ^(r >> 31) + } + + g := yy1 - 22554*cb1 - 46802*cr1 + if uint32(g)&0xff000000 == 0 { + g >>= 16 + } else { + g = ^(g >> 31) + } + + b := yy1 + 116130*cb1 + if uint32(b)&0xff000000 == 0 { + b >>= 16 + } else { + b = ^(b >> 31) + } + + d := dst[j : j+4 : j+4] + d[0] = uint8(r) + d[1] = uint8(g) + d[2] = uint8(b) + d[3] = 0xff + + iy++ + j += 4 + } + } + + case *image.Paletted: + j := 0 + for y := y1; y < y2; y++ { + i := y*img.Stride + x1 + for x := x1; x < x2; x++ { + c := s.palette[img.Pix[i]] + d := dst[j : j+4 : j+4] + d[0] = c.R + d[1] = c.G + d[2] = c.B + d[3] = c.A + j += 4 + i++ + } + } + + default: + j := 0 + b := s.image.Bounds() + x1 += b.Min.X + x2 += b.Min.X + y1 += b.Min.Y + y2 += b.Min.Y + for y := y1; y < y2; y++ { + for x := x1; x < x2; x++ { + r16, g16, b16, a16 := s.image.At(x, y).RGBA() + d := dst[j : j+4 : j+4] + switch a16 { + case 0xffff: + d[0] = uint8(r16 >> 8) + d[1] = uint8(g16 >> 8) + d[2] = uint8(b16 >> 8) + d[3] = 0xff + case 0: + d[0] = 0 + d[1] = 0 + d[2] = 0 + d[3] = 0 + default: + d[0] = uint8(((r16 * 0xffff) / a16) >> 8) + d[1] = uint8(((g16 * 0xffff) / a16) >> 8) + d[2] = uint8(((b16 * 0xffff) / a16) >> 8) + d[3] = uint8(a16 >> 8) + } + j += 4 + } + } + } +} diff --git a/vendor/github.com/disintegration/imaging/tools.go b/vendor/github.com/disintegration/imaging/tools.go new file mode 100644 index 0000000..0ec19a0 --- /dev/null +++ b/vendor/github.com/disintegration/imaging/tools.go @@ -0,0 +1,249 @@ +package imaging + +import ( + "bytes" + "image" + "image/color" + "math" +) + +// New creates a new image with the specified width and height, and fills it with the specified color. +func New(width, height int, fillColor color.Color) *image.NRGBA { + if width <= 0 || height <= 0 { + return &image.NRGBA{} + } + + c := color.NRGBAModel.Convert(fillColor).(color.NRGBA) + if (c == color.NRGBA{0, 0, 0, 0}) { + return image.NewNRGBA(image.Rect(0, 0, width, height)) + } + + return &image.NRGBA{ + Pix: bytes.Repeat([]byte{c.R, c.G, c.B, c.A}, width*height), + Stride: 4 * width, + Rect: image.Rect(0, 0, width, height), + } +} + +// Clone returns a copy of the given image. +func Clone(img image.Image) *image.NRGBA { + src := newScanner(img) + dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h)) + size := src.w * 4 + parallel(0, src.h, func(ys <-chan int) { + for y := range ys { + i := y * dst.Stride + src.scan(0, y, src.w, y+1, dst.Pix[i:i+size]) + } + }) + return dst +} + +// Anchor is the anchor point for image alignment. +type Anchor int + +// Anchor point positions. +const ( + Center Anchor = iota + TopLeft + Top + TopRight + Left + Right + BottomLeft + Bottom + BottomRight +) + +func anchorPt(b image.Rectangle, w, h int, anchor Anchor) image.Point { + var x, y int + switch anchor { + case TopLeft: + x = b.Min.X + y = b.Min.Y + case Top: + x = b.Min.X + (b.Dx()-w)/2 + y = b.Min.Y + case TopRight: + x = b.Max.X - w + y = b.Min.Y + case Left: + x = b.Min.X + y = b.Min.Y + (b.Dy()-h)/2 + case Right: + x = b.Max.X - w + y = b.Min.Y + (b.Dy()-h)/2 + case BottomLeft: + x = b.Min.X + y = b.Max.Y - h + case Bottom: + x = b.Min.X + (b.Dx()-w)/2 + y = b.Max.Y - h + case BottomRight: + x = b.Max.X - w + y = b.Max.Y - h + default: + x = b.Min.X + (b.Dx()-w)/2 + y = b.Min.Y + (b.Dy()-h)/2 + } + return image.Pt(x, y) +} + +// Crop cuts out a rectangular region with the specified bounds +// from the image and returns the cropped image. +func Crop(img image.Image, rect image.Rectangle) *image.NRGBA { + r := rect.Intersect(img.Bounds()).Sub(img.Bounds().Min) + if r.Empty() { + return &image.NRGBA{} + } + src := newScanner(img) + dst := image.NewNRGBA(image.Rect(0, 0, r.Dx(), r.Dy())) + rowSize := r.Dx() * 4 + parallel(r.Min.Y, r.Max.Y, func(ys <-chan int) { + for y := range ys { + i := (y - r.Min.Y) * dst.Stride + src.scan(r.Min.X, y, r.Max.X, y+1, dst.Pix[i:i+rowSize]) + } + }) + return dst +} + +// CropAnchor cuts out a rectangular region with the specified size +// from the image using the specified anchor point and returns the cropped image. +func CropAnchor(img image.Image, width, height int, anchor Anchor) *image.NRGBA { + srcBounds := img.Bounds() + pt := anchorPt(srcBounds, width, height, anchor) + r := image.Rect(0, 0, width, height).Add(pt) + b := srcBounds.Intersect(r) + return Crop(img, b) +} + +// CropCenter cuts out a rectangular region with the specified size +// from the center of the image and returns the cropped image. +func CropCenter(img image.Image, width, height int) *image.NRGBA { + return CropAnchor(img, width, height, Center) +} + +// Paste pastes the img image to the background image at the specified position and returns the combined image. +func Paste(background, img image.Image, pos image.Point) *image.NRGBA { + dst := Clone(background) + pos = pos.Sub(background.Bounds().Min) + pasteRect := image.Rectangle{Min: pos, Max: pos.Add(img.Bounds().Size())} + interRect := pasteRect.Intersect(dst.Bounds()) + if interRect.Empty() { + return dst + } + src := newScanner(img) + parallel(interRect.Min.Y, interRect.Max.Y, func(ys <-chan int) { + for y := range ys { + x1 := interRect.Min.X - pasteRect.Min.X + x2 := interRect.Max.X - pasteRect.Min.X + y1 := y - pasteRect.Min.Y + y2 := y1 + 1 + i1 := y*dst.Stride + interRect.Min.X*4 + i2 := i1 + interRect.Dx()*4 + src.scan(x1, y1, x2, y2, dst.Pix[i1:i2]) + } + }) + return dst +} + +// PasteCenter pastes the img image to the center of the background image and returns the combined image. +func PasteCenter(background, img image.Image) *image.NRGBA { + bgBounds := background.Bounds() + bgW := bgBounds.Dx() + bgH := bgBounds.Dy() + bgMinX := bgBounds.Min.X + bgMinY := bgBounds.Min.Y + + centerX := bgMinX + bgW/2 + centerY := bgMinY + bgH/2 + + x0 := centerX - img.Bounds().Dx()/2 + y0 := centerY - img.Bounds().Dy()/2 + + return Paste(background, img, image.Pt(x0, y0)) +} + +// Overlay draws the img image over the background image at given position +// and returns the combined image. Opacity parameter is the opacity of the img +// image layer, used to compose the images, it must be from 0.0 to 1.0. +// +// Examples: +// +// // Draw spriteImage over backgroundImage at the given position (x=50, y=50). +// dstImage := imaging.Overlay(backgroundImage, spriteImage, image.Pt(50, 50), 1.0) +// +// // Blend two opaque images of the same size. +// dstImage := imaging.Overlay(imageOne, imageTwo, image.Pt(0, 0), 0.5) +// +func Overlay(background, img image.Image, pos image.Point, opacity float64) *image.NRGBA { + opacity = math.Min(math.Max(opacity, 0.0), 1.0) // Ensure 0.0 <= opacity <= 1.0. + dst := Clone(background) + pos = pos.Sub(background.Bounds().Min) + pasteRect := image.Rectangle{Min: pos, Max: pos.Add(img.Bounds().Size())} + interRect := pasteRect.Intersect(dst.Bounds()) + if interRect.Empty() { + return dst + } + src := newScanner(img) + parallel(interRect.Min.Y, interRect.Max.Y, func(ys <-chan int) { + scanLine := make([]uint8, interRect.Dx()*4) + for y := range ys { + x1 := interRect.Min.X - pasteRect.Min.X + x2 := interRect.Max.X - pasteRect.Min.X + y1 := y - pasteRect.Min.Y + y2 := y1 + 1 + src.scan(x1, y1, x2, y2, scanLine) + i := y*dst.Stride + interRect.Min.X*4 + j := 0 + for x := interRect.Min.X; x < interRect.Max.X; x++ { + d := dst.Pix[i : i+4 : i+4] + r1 := float64(d[0]) + g1 := float64(d[1]) + b1 := float64(d[2]) + a1 := float64(d[3]) + + s := scanLine[j : j+4 : j+4] + r2 := float64(s[0]) + g2 := float64(s[1]) + b2 := float64(s[2]) + a2 := float64(s[3]) + + coef2 := opacity * a2 / 255 + coef1 := (1 - coef2) * a1 / 255 + coefSum := coef1 + coef2 + coef1 /= coefSum + coef2 /= coefSum + + d[0] = uint8(r1*coef1 + r2*coef2) + d[1] = uint8(g1*coef1 + g2*coef2) + d[2] = uint8(b1*coef1 + b2*coef2) + d[3] = uint8(math.Min(a1+a2*opacity*(255-a1)/255, 255)) + + i += 4 + j += 4 + } + } + }) + return dst +} + +// OverlayCenter overlays the img image to the center of the background image and +// returns the combined image. Opacity parameter is the opacity of the img +// image layer, used to compose the images, it must be from 0.0 to 1.0. +func OverlayCenter(background, img image.Image, opacity float64) *image.NRGBA { + bgBounds := background.Bounds() + bgW := bgBounds.Dx() + bgH := bgBounds.Dy() + bgMinX := bgBounds.Min.X + bgMinY := bgBounds.Min.Y + + centerX := bgMinX + bgW/2 + centerY := bgMinY + bgH/2 + + x0 := centerX - img.Bounds().Dx()/2 + y0 := centerY - img.Bounds().Dy()/2 + + return Overlay(background, img, image.Point{x0, y0}, opacity) +} diff --git a/vendor/github.com/disintegration/imaging/transform.go b/vendor/github.com/disintegration/imaging/transform.go new file mode 100644 index 0000000..fe4a92f --- /dev/null +++ b/vendor/github.com/disintegration/imaging/transform.go @@ -0,0 +1,268 @@ +package imaging + +import ( + "image" + "image/color" + "math" +) + +// FlipH flips the image horizontally (from left to right) and returns the transformed image. +func FlipH(img image.Image) *image.NRGBA { + src := newScanner(img) + dstW := src.w + dstH := src.h + rowSize := dstW * 4 + dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) + parallel(0, dstH, func(ys <-chan int) { + for dstY := range ys { + i := dstY * dst.Stride + srcY := dstY + src.scan(0, srcY, src.w, srcY+1, dst.Pix[i:i+rowSize]) + reverse(dst.Pix[i : i+rowSize]) + } + }) + return dst +} + +// FlipV flips the image vertically (from top to bottom) and returns the transformed image. +func FlipV(img image.Image) *image.NRGBA { + src := newScanner(img) + dstW := src.w + dstH := src.h + rowSize := dstW * 4 + dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) + parallel(0, dstH, func(ys <-chan int) { + for dstY := range ys { + i := dstY * dst.Stride + srcY := dstH - dstY - 1 + src.scan(0, srcY, src.w, srcY+1, dst.Pix[i:i+rowSize]) + } + }) + return dst +} + +// Transpose flips the image horizontally and rotates 90 degrees counter-clockwise. +func Transpose(img image.Image) *image.NRGBA { + src := newScanner(img) + dstW := src.h + dstH := src.w + rowSize := dstW * 4 + dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) + parallel(0, dstH, func(ys <-chan int) { + for dstY := range ys { + i := dstY * dst.Stride + srcX := dstY + src.scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize]) + } + }) + return dst +} + +// Transverse flips the image vertically and rotates 90 degrees counter-clockwise. +func Transverse(img image.Image) *image.NRGBA { + src := newScanner(img) + dstW := src.h + dstH := src.w + rowSize := dstW * 4 + dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) + parallel(0, dstH, func(ys <-chan int) { + for dstY := range ys { + i := dstY * dst.Stride + srcX := dstH - dstY - 1 + src.scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize]) + reverse(dst.Pix[i : i+rowSize]) + } + }) + return dst +} + +// Rotate90 rotates the image 90 degrees counter-clockwise and returns the transformed image. +func Rotate90(img image.Image) *image.NRGBA { + src := newScanner(img) + dstW := src.h + dstH := src.w + rowSize := dstW * 4 + dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) + parallel(0, dstH, func(ys <-chan int) { + for dstY := range ys { + i := dstY * dst.Stride + srcX := dstH - dstY - 1 + src.scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize]) + } + }) + return dst +} + +// Rotate180 rotates the image 180 degrees counter-clockwise and returns the transformed image. +func Rotate180(img image.Image) *image.NRGBA { + src := newScanner(img) + dstW := src.w + dstH := src.h + rowSize := dstW * 4 + dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) + parallel(0, dstH, func(ys <-chan int) { + for dstY := range ys { + i := dstY * dst.Stride + srcY := dstH - dstY - 1 + src.scan(0, srcY, src.w, srcY+1, dst.Pix[i:i+rowSize]) + reverse(dst.Pix[i : i+rowSize]) + } + }) + return dst +} + +// Rotate270 rotates the image 270 degrees counter-clockwise and returns the transformed image. +func Rotate270(img image.Image) *image.NRGBA { + src := newScanner(img) + dstW := src.h + dstH := src.w + rowSize := dstW * 4 + dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) + parallel(0, dstH, func(ys <-chan int) { + for dstY := range ys { + i := dstY * dst.Stride + srcX := dstY + src.scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize]) + reverse(dst.Pix[i : i+rowSize]) + } + }) + return dst +} + +// Rotate rotates an image by the given angle counter-clockwise . +// The angle parameter is the rotation angle in degrees. +// The bgColor parameter specifies the color of the uncovered zone after the rotation. +func Rotate(img image.Image, angle float64, bgColor color.Color) *image.NRGBA { + angle = angle - math.Floor(angle/360)*360 + + switch angle { + case 0: + return Clone(img) + case 90: + return Rotate90(img) + case 180: + return Rotate180(img) + case 270: + return Rotate270(img) + } + + src := toNRGBA(img) + srcW := src.Bounds().Max.X + srcH := src.Bounds().Max.Y + dstW, dstH := rotatedSize(srcW, srcH, angle) + dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) + + if dstW <= 0 || dstH <= 0 { + return dst + } + + srcXOff := float64(srcW)/2 - 0.5 + srcYOff := float64(srcH)/2 - 0.5 + dstXOff := float64(dstW)/2 - 0.5 + dstYOff := float64(dstH)/2 - 0.5 + + bgColorNRGBA := color.NRGBAModel.Convert(bgColor).(color.NRGBA) + sin, cos := math.Sincos(math.Pi * angle / 180) + + parallel(0, dstH, func(ys <-chan int) { + for dstY := range ys { + for dstX := 0; dstX < dstW; dstX++ { + xf, yf := rotatePoint(float64(dstX)-dstXOff, float64(dstY)-dstYOff, sin, cos) + xf, yf = xf+srcXOff, yf+srcYOff + interpolatePoint(dst, dstX, dstY, src, xf, yf, bgColorNRGBA) + } + } + }) + + return dst +} + +func rotatePoint(x, y, sin, cos float64) (float64, float64) { + return x*cos - y*sin, x*sin + y*cos +} + +func rotatedSize(w, h int, angle float64) (int, int) { + if w <= 0 || h <= 0 { + return 0, 0 + } + + sin, cos := math.Sincos(math.Pi * angle / 180) + x1, y1 := rotatePoint(float64(w-1), 0, sin, cos) + x2, y2 := rotatePoint(float64(w-1), float64(h-1), sin, cos) + x3, y3 := rotatePoint(0, float64(h-1), sin, cos) + + minx := math.Min(x1, math.Min(x2, math.Min(x3, 0))) + maxx := math.Max(x1, math.Max(x2, math.Max(x3, 0))) + miny := math.Min(y1, math.Min(y2, math.Min(y3, 0))) + maxy := math.Max(y1, math.Max(y2, math.Max(y3, 0))) + + neww := maxx - minx + 1 + if neww-math.Floor(neww) > 0.1 { + neww++ + } + newh := maxy - miny + 1 + if newh-math.Floor(newh) > 0.1 { + newh++ + } + + return int(neww), int(newh) +} + +func interpolatePoint(dst *image.NRGBA, dstX, dstY int, src *image.NRGBA, xf, yf float64, bgColor color.NRGBA) { + j := dstY*dst.Stride + dstX*4 + d := dst.Pix[j : j+4 : j+4] + + x0 := int(math.Floor(xf)) + y0 := int(math.Floor(yf)) + bounds := src.Bounds() + if !image.Pt(x0, y0).In(image.Rect(bounds.Min.X-1, bounds.Min.Y-1, bounds.Max.X, bounds.Max.Y)) { + d[0] = bgColor.R + d[1] = bgColor.G + d[2] = bgColor.B + d[3] = bgColor.A + return + } + + xq := xf - float64(x0) + yq := yf - float64(y0) + points := [4]image.Point{ + {x0, y0}, + {x0 + 1, y0}, + {x0, y0 + 1}, + {x0 + 1, y0 + 1}, + } + weights := [4]float64{ + (1 - xq) * (1 - yq), + xq * (1 - yq), + (1 - xq) * yq, + xq * yq, + } + + var r, g, b, a float64 + for i := 0; i < 4; i++ { + p := points[i] + w := weights[i] + if p.In(bounds) { + i := p.Y*src.Stride + p.X*4 + s := src.Pix[i : i+4 : i+4] + wa := float64(s[3]) * w + r += float64(s[0]) * wa + g += float64(s[1]) * wa + b += float64(s[2]) * wa + a += wa + } else { + wa := float64(bgColor.A) * w + r += float64(bgColor.R) * wa + g += float64(bgColor.G) * wa + b += float64(bgColor.B) * wa + a += wa + } + } + if a != 0 { + aInv := 1 / a + d[0] = clamp(r * aInv) + d[1] = clamp(g * aInv) + d[2] = clamp(b * aInv) + d[3] = clamp(a) + } +} diff --git a/vendor/github.com/disintegration/imaging/utils.go b/vendor/github.com/disintegration/imaging/utils.go new file mode 100644 index 0000000..6c7af1a --- /dev/null +++ b/vendor/github.com/disintegration/imaging/utils.go @@ -0,0 +1,167 @@ +package imaging + +import ( + "image" + "math" + "runtime" + "sync" +) + +// parallel processes the data in separate goroutines. +func parallel(start, stop int, fn func(<-chan int)) { + count := stop - start + if count < 1 { + return + } + + procs := runtime.GOMAXPROCS(0) + if procs > count { + procs = count + } + + c := make(chan int, count) + for i := start; i < stop; i++ { + c <- i + } + close(c) + + var wg sync.WaitGroup + for i := 0; i < procs; i++ { + wg.Add(1) + go func() { + defer wg.Done() + fn(c) + }() + } + wg.Wait() +} + +// absint returns the absolute value of i. +func absint(i int) int { + if i < 0 { + return -i + } + return i +} + +// clamp rounds and clamps float64 value to fit into uint8. +func clamp(x float64) uint8 { + v := int64(x + 0.5) + if v > 255 { + return 255 + } + if v > 0 { + return uint8(v) + } + return 0 +} + +func reverse(pix []uint8) { + if len(pix) <= 4 { + return + } + i := 0 + j := len(pix) - 4 + for i < j { + pi := pix[i : i+4 : i+4] + pj := pix[j : j+4 : j+4] + pi[0], pj[0] = pj[0], pi[0] + pi[1], pj[1] = pj[1], pi[1] + pi[2], pj[2] = pj[2], pi[2] + pi[3], pj[3] = pj[3], pi[3] + i += 4 + j -= 4 + } +} + +func toNRGBA(img image.Image) *image.NRGBA { + if img, ok := img.(*image.NRGBA); ok { + return &image.NRGBA{ + Pix: img.Pix, + Stride: img.Stride, + Rect: img.Rect.Sub(img.Rect.Min), + } + } + return Clone(img) +} + +// rgbToHSL converts a color from RGB to HSL. +func rgbToHSL(r, g, b uint8) (float64, float64, float64) { + rr := float64(r) / 255 + gg := float64(g) / 255 + bb := float64(b) / 255 + + max := math.Max(rr, math.Max(gg, bb)) + min := math.Min(rr, math.Min(gg, bb)) + + l := (max + min) / 2 + + if max == min { + return 0, 0, l + } + + var h, s float64 + d := max - min + if l > 0.5 { + s = d / (2 - max - min) + } else { + s = d / (max + min) + } + + switch max { + case rr: + h = (gg - bb) / d + if g < b { + h += 6 + } + case gg: + h = (bb-rr)/d + 2 + case bb: + h = (rr-gg)/d + 4 + } + h /= 6 + + return h, s, l +} + +// hslToRGB converts a color from HSL to RGB. +func hslToRGB(h, s, l float64) (uint8, uint8, uint8) { + var r, g, b float64 + if s == 0 { + v := clamp(l * 255) + return v, v, v + } + + var q float64 + if l < 0.5 { + q = l * (1 + s) + } else { + q = l + s - l*s + } + p := 2*l - q + + r = hueToRGB(p, q, h+1/3.0) + g = hueToRGB(p, q, h) + b = hueToRGB(p, q, h-1/3.0) + + return clamp(r * 255), clamp(g * 255), clamp(b * 255) +} + +func hueToRGB(p, q, t float64) float64 { + if t < 0 { + t++ + } + if t > 1 { + t-- + } + if t < 1/6.0 { + return p + (q-p)*6*t + } + if t < 1/2.0 { + return q + } + if t < 2/3.0 { + return p + (q-p)*(2/3.0-t)*6 + } + return p +} diff --git a/vendor/github.com/fatih/color/LICENSE.md b/vendor/github.com/fatih/color/LICENSE.md new file mode 100644 index 0000000..25fdaf6 --- /dev/null +++ b/vendor/github.com/fatih/color/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md new file mode 100644 index 0000000..d62e402 --- /dev/null +++ b/vendor/github.com/fatih/color/README.md @@ -0,0 +1,173 @@ +# color [![](https://github.com/fatih/color/workflows/build/badge.svg)](https://github.com/fatih/color/actions) [![PkgGoDev](https://pkg.go.dev/badge/github.com/fatih/color)](https://pkg.go.dev/github.com/fatih/color) + +Color lets you use colorized outputs in terms of [ANSI Escape +Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It +has support for Windows too! The API can be used in several ways, pick one that +suits you. + +![Color](https://user-images.githubusercontent.com/438920/96832689-03b3e000-13f4-11eb-9803-46f4c4de3406.jpg) + + +## Install + +```bash +go get github.com/fatih/color +``` + +## Examples + +### Standard colors + +```go +// Print with default helper functions +color.Cyan("Prints text in cyan.") + +// A newline will be appended automatically +color.Blue("Prints %s in blue.", "text") + +// These are using the default foreground colors +color.Red("We have red") +color.Magenta("And many others ..") + +``` + +### Mix and reuse colors + +```go +// Create a new color object +c := color.New(color.FgCyan).Add(color.Underline) +c.Println("Prints cyan text with an underline.") + +// Or just add them to New() +d := color.New(color.FgCyan, color.Bold) +d.Printf("This prints bold cyan %s\n", "too!.") + +// Mix up foreground and background colors, create new mixes! +red := color.New(color.FgRed) + +boldRed := red.Add(color.Bold) +boldRed.Println("This will print text in bold red.") + +whiteBackground := red.Add(color.BgWhite) +whiteBackground.Println("Red text with white background.") +``` + +### Use your own output (io.Writer) + +```go +// Use your own io.Writer output +color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + +blue := color.New(color.FgBlue) +blue.Fprint(writer, "This will print text in blue.") +``` + +### Custom print functions (PrintFunc) + +```go +// Create a custom print function for convenience +red := color.New(color.FgRed).PrintfFunc() +red("Warning") +red("Error: %s", err) + +// Mix up multiple attributes +notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() +notice("Don't forget this...") +``` + +### Custom fprint functions (FprintFunc) + +```go +blue := color.New(FgBlue).FprintfFunc() +blue(myWriter, "important notice: %s", stars) + +// Mix up with multiple attributes +success := color.New(color.Bold, color.FgGreen).FprintlnFunc() +success(myWriter, "Don't forget this...") +``` + +### Insert into noncolor strings (SprintFunc) + +```go +// Create SprintXxx functions to mix strings with other non-colorized strings: +yellow := color.New(color.FgYellow).SprintFunc() +red := color.New(color.FgRed).SprintFunc() +fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error")) + +info := color.New(color.FgWhite, color.BgGreen).SprintFunc() +fmt.Printf("This %s rocks!\n", info("package")) + +// Use helper functions +fmt.Println("This", color.RedString("warning"), "should be not neglected.") +fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.") + +// Windows supported too! Just don't forget to change the output to color.Output +fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) +``` + +### Plug into existing code + +```go +// Use handy standard colors +color.Set(color.FgYellow) + +fmt.Println("Existing text will now be in yellow") +fmt.Printf("This one %s\n", "too") + +color.Unset() // Don't forget to unset + +// You can mix up parameters +color.Set(color.FgMagenta, color.Bold) +defer color.Unset() // Use it in your function + +fmt.Println("All text will now be bold magenta.") +``` + +### Disable/Enable color + +There might be a case where you want to explicitly disable/enable color output. the +`go-isatty` package will automatically disable color output for non-tty output streams +(for example if the output were piped directly to `less`) + +`Color` has support to disable/enable colors both globally and for single color +definitions. For example suppose you have a CLI app and a `--no-color` bool flag. You +can easily disable the color output with: + +```go + +var flagNoColor = flag.Bool("no-color", false, "Disable color output") + +if *flagNoColor { + color.NoColor = true // disables colorized output +} +``` + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + +```go +c := color.New(color.FgCyan) +c.Println("Prints cyan text") + +c.DisableColor() +c.Println("This is printed without any color") + +c.EnableColor() +c.Println("This prints again cyan...") +``` + +## Todo + +* Save/Return previous values +* Evaluate fmt.Formatter interface + + +## Credits + + * [Fatih Arslan](https://github.com/fatih) + * Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) + +## License + +The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details + diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go new file mode 100644 index 0000000..91c8e9f --- /dev/null +++ b/vendor/github.com/fatih/color/color.go @@ -0,0 +1,603 @@ +package color + +import ( + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + + "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +var ( + // NoColor defines if the output is colorized or not. It's dynamically set to + // false or true based on the stdout's file descriptor referring to a terminal + // or not. This is a global option and affects all colors. For more control + // over each color block use the methods DisableColor() individually. + NoColor = os.Getenv("TERM") == "dumb" || + (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) + + // Output defines the standard output of the print functions. By default + // os.Stdout is used. + Output = colorable.NewColorableStdout() + + // Error defines a color supporting writer for os.Stderr. + Error = colorable.NewColorableStderr() + + // colorsCache is used to reduce the count of created Color objects and + // allows to reuse already created objects with required Attribute. + colorsCache = make(map[Attribute]*Color) + colorsCacheMu sync.Mutex // protects colorsCache +) + +// Color defines a custom color object which is defined by SGR parameters. +type Color struct { + params []Attribute + noColor *bool +} + +// Attribute defines a single SGR Code +type Attribute int + +const escape = "\x1b" + +// Base attributes +const ( + Reset Attribute = iota + Bold + Faint + Italic + Underline + BlinkSlow + BlinkRapid + ReverseVideo + Concealed + CrossedOut +) + +// Foreground text colors +const ( + FgBlack Attribute = iota + 30 + FgRed + FgGreen + FgYellow + FgBlue + FgMagenta + FgCyan + FgWhite +) + +// Foreground Hi-Intensity text colors +const ( + FgHiBlack Attribute = iota + 90 + FgHiRed + FgHiGreen + FgHiYellow + FgHiBlue + FgHiMagenta + FgHiCyan + FgHiWhite +) + +// Background text colors +const ( + BgBlack Attribute = iota + 40 + BgRed + BgGreen + BgYellow + BgBlue + BgMagenta + BgCyan + BgWhite +) + +// Background Hi-Intensity text colors +const ( + BgHiBlack Attribute = iota + 100 + BgHiRed + BgHiGreen + BgHiYellow + BgHiBlue + BgHiMagenta + BgHiCyan + BgHiWhite +) + +// New returns a newly created color object. +func New(value ...Attribute) *Color { + c := &Color{params: make([]Attribute, 0)} + c.Add(value...) + return c +} + +// Set sets the given parameters immediately. It will change the color of +// output with the given SGR parameters until color.Unset() is called. +func Set(p ...Attribute) *Color { + c := New(p...) + c.Set() + return c +} + +// Unset resets all escape attributes and clears the output. Usually should +// be called after Set(). +func Unset() { + if NoColor { + return + } + + fmt.Fprintf(Output, "%s[%dm", escape, Reset) +} + +// Set sets the SGR sequence. +func (c *Color) Set() *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(Output, c.format()) + return c +} + +func (c *Color) unset() { + if c.isNoColorSet() { + return + } + + Unset() +} + +func (c *Color) setWriter(w io.Writer) *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(w, c.format()) + return c +} + +func (c *Color) unsetWriter(w io.Writer) { + if c.isNoColorSet() { + return + } + + if NoColor { + return + } + + fmt.Fprintf(w, "%s[%dm", escape, Reset) +} + +// Add is used to chain SGR parameters. Use as many as parameters to combine +// and create custom color objects. Example: Add(color.FgRed, color.Underline). +func (c *Color) Add(value ...Attribute) *Color { + c.params = append(c.params, value...) + return c +} + +func (c *Color) prepend(value Attribute) { + c.params = append(c.params, 0) + copy(c.params[1:], c.params[0:]) + c.params[0] = value +} + +// Fprint formats using the default formats for its operands and writes to w. +// Spaces are added between operands when neither is a string. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprint(w, a...) +} + +// Print formats using the default formats for its operands and writes to +// standard output. Spaces are added between operands when neither is a +// string. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Print(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprint(Output, a...) +} + +// Fprintf formats according to a format specifier and writes to w. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintf(w, format, a...) +} + +// Printf formats according to a format specifier and writes to standard output. +// It returns the number of bytes written and any write error encountered. +// This is the standard fmt.Printf() method wrapped with the given color. +func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintf(Output, format, a...) +} + +// Fprintln formats using the default formats for its operands and writes to w. +// Spaces are always added between operands and a newline is appended. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintln(w, a...) +} + +// Println formats using the default formats for its operands and writes to +// standard output. Spaces are always added between operands and a newline is +// appended. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Println(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintln(Output, a...) +} + +// Sprint is just like Print, but returns a string instead of printing it. +func (c *Color) Sprint(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) +} + +// Sprintln is just like Println, but returns a string instead of printing it. +func (c *Color) Sprintln(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) +} + +// Sprintf is just like Printf, but returns a string instead of printing it. +func (c *Color) Sprintf(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) +} + +// FprintFunc returns a new function that prints the passed arguments as +// colorized with color.Fprint(). +func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprint(w, a...) + } +} + +// PrintFunc returns a new function that prints the passed arguments as +// colorized with color.Print(). +func (c *Color) PrintFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Print(a...) + } +} + +// FprintfFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintf(). +func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) { + return func(w io.Writer, format string, a ...interface{}) { + c.Fprintf(w, format, a...) + } +} + +// PrintfFunc returns a new function that prints the passed arguments as +// colorized with color.Printf(). +func (c *Color) PrintfFunc() func(format string, a ...interface{}) { + return func(format string, a ...interface{}) { + c.Printf(format, a...) + } +} + +// FprintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintln(). +func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprintln(w, a...) + } +} + +// PrintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Println(). +func (c *Color) PrintlnFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Println(a...) + } +} + +// SprintFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprint(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output, example: +// +// put := New(FgYellow).SprintFunc() +// fmt.Fprintf(color.Output, "This is a %s", put("warning")) +func (c *Color) SprintFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) + } +} + +// SprintfFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintf(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { + return func(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) + } +} + +// SprintlnFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintln(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintlnFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) + } +} + +// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m" +// an example output might be: "1;36" -> bold cyan +func (c *Color) sequence() string { + format := make([]string, len(c.params)) + for i, v := range c.params { + format[i] = strconv.Itoa(int(v)) + } + + return strings.Join(format, ";") +} + +// wrap wraps the s string with the colors attributes. The string is ready to +// be printed. +func (c *Color) wrap(s string) string { + if c.isNoColorSet() { + return s + } + + return c.format() + s + c.unformat() +} + +func (c *Color) format() string { + return fmt.Sprintf("%s[%sm", escape, c.sequence()) +} + +func (c *Color) unformat() string { + return fmt.Sprintf("%s[%dm", escape, Reset) +} + +// DisableColor disables the color output. Useful to not change any existing +// code and still being able to output. Can be used for flags like +// "--no-color". To enable back use EnableColor() method. +func (c *Color) DisableColor() { + c.noColor = boolPtr(true) +} + +// EnableColor enables the color output. Use it in conjunction with +// DisableColor(). Otherwise this method has no side effects. +func (c *Color) EnableColor() { + c.noColor = boolPtr(false) +} + +func (c *Color) isNoColorSet() bool { + // check first if we have user setted action + if c.noColor != nil { + return *c.noColor + } + + // if not return the global option, which is disabled by default + return NoColor +} + +// Equals returns a boolean value indicating whether two colors are equal. +func (c *Color) Equals(c2 *Color) bool { + if len(c.params) != len(c2.params) { + return false + } + + for _, attr := range c.params { + if !c2.attrExists(attr) { + return false + } + } + + return true +} + +func (c *Color) attrExists(a Attribute) bool { + for _, attr := range c.params { + if attr == a { + return true + } + } + + return false +} + +func boolPtr(v bool) *bool { + return &v +} + +func getCachedColor(p Attribute) *Color { + colorsCacheMu.Lock() + defer colorsCacheMu.Unlock() + + c, ok := colorsCache[p] + if !ok { + c = New(p) + colorsCache[p] = c + } + + return c +} + +func colorPrint(format string, p Attribute, a ...interface{}) { + c := getCachedColor(p) + + if !strings.HasSuffix(format, "\n") { + format += "\n" + } + + if len(a) == 0 { + c.Print(format) + } else { + c.Printf(format, a...) + } +} + +func colorString(format string, p Attribute, a ...interface{}) string { + c := getCachedColor(p) + + if len(a) == 0 { + return c.SprintFunc()(format) + } + + return c.SprintfFunc()(format, a...) +} + +// Black is a convenient helper function to print with black foreground. A +// newline is appended to format by default. +func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) } + +// Red is a convenient helper function to print with red foreground. A +// newline is appended to format by default. +func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) } + +// Green is a convenient helper function to print with green foreground. A +// newline is appended to format by default. +func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) } + +// Yellow is a convenient helper function to print with yellow foreground. +// A newline is appended to format by default. +func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) } + +// Blue is a convenient helper function to print with blue foreground. A +// newline is appended to format by default. +func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) } + +// Magenta is a convenient helper function to print with magenta foreground. +// A newline is appended to format by default. +func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) } + +// Cyan is a convenient helper function to print with cyan foreground. A +// newline is appended to format by default. +func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) } + +// White is a convenient helper function to print with white foreground. A +// newline is appended to format by default. +func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) } + +// BlackString is a convenient helper function to return a string with black +// foreground. +func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) } + +// RedString is a convenient helper function to return a string with red +// foreground. +func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) } + +// GreenString is a convenient helper function to return a string with green +// foreground. +func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) } + +// YellowString is a convenient helper function to return a string with yellow +// foreground. +func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) } + +// BlueString is a convenient helper function to return a string with blue +// foreground. +func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) } + +// MagentaString is a convenient helper function to return a string with magenta +// foreground. +func MagentaString(format string, a ...interface{}) string { + return colorString(format, FgMagenta, a...) +} + +// CyanString is a convenient helper function to return a string with cyan +// foreground. +func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) } + +// WhiteString is a convenient helper function to return a string with white +// foreground. +func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) } + +// HiBlack is a convenient helper function to print with hi-intensity black foreground. A +// newline is appended to format by default. +func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) } + +// HiRed is a convenient helper function to print with hi-intensity red foreground. A +// newline is appended to format by default. +func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) } + +// HiGreen is a convenient helper function to print with hi-intensity green foreground. A +// newline is appended to format by default. +func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) } + +// HiYellow is a convenient helper function to print with hi-intensity yellow foreground. +// A newline is appended to format by default. +func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) } + +// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A +// newline is appended to format by default. +func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) } + +// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground. +// A newline is appended to format by default. +func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) } + +// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A +// newline is appended to format by default. +func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) } + +// HiWhite is a convenient helper function to print with hi-intensity white foreground. A +// newline is appended to format by default. +func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) } + +// HiBlackString is a convenient helper function to return a string with hi-intensity black +// foreground. +func HiBlackString(format string, a ...interface{}) string { + return colorString(format, FgHiBlack, a...) +} + +// HiRedString is a convenient helper function to return a string with hi-intensity red +// foreground. +func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) } + +// HiGreenString is a convenient helper function to return a string with hi-intensity green +// foreground. +func HiGreenString(format string, a ...interface{}) string { + return colorString(format, FgHiGreen, a...) +} + +// HiYellowString is a convenient helper function to return a string with hi-intensity yellow +// foreground. +func HiYellowString(format string, a ...interface{}) string { + return colorString(format, FgHiYellow, a...) +} + +// HiBlueString is a convenient helper function to return a string with hi-intensity blue +// foreground. +func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) } + +// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta +// foreground. +func HiMagentaString(format string, a ...interface{}) string { + return colorString(format, FgHiMagenta, a...) +} + +// HiCyanString is a convenient helper function to return a string with hi-intensity cyan +// foreground. +func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) } + +// HiWhiteString is a convenient helper function to return a string with hi-intensity white +// foreground. +func HiWhiteString(format string, a ...interface{}) string { + return colorString(format, FgHiWhite, a...) +} diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go new file mode 100644 index 0000000..cf1e965 --- /dev/null +++ b/vendor/github.com/fatih/color/doc.go @@ -0,0 +1,133 @@ +/* +Package color is an ANSI color package to output colorized or SGR defined +output to the standard output. The API can be used in several way, pick one +that suits you. + +Use simple and default helper functions with predefined foreground colors: + + color.Cyan("Prints text in cyan.") + + // a newline will be appended automatically + color.Blue("Prints %s in blue.", "text") + + // More default foreground colors.. + color.Red("We have red") + color.Yellow("Yellow color too!") + color.Magenta("And many others ..") + + // Hi-intensity colors + color.HiGreen("Bright green color.") + color.HiBlack("Bright black means gray..") + color.HiWhite("Shiny white color!") + +However there are times where custom color mixes are required. Below are some +examples to create custom color objects and use the print functions of each +separate color object. + + // Create a new color object + c := color.New(color.FgCyan).Add(color.Underline) + c.Println("Prints cyan text with an underline.") + + // Or just add them to New() + d := color.New(color.FgCyan, color.Bold) + d.Printf("This prints bold cyan %s\n", "too!.") + + + // Mix up foreground and background colors, create new mixes! + red := color.New(color.FgRed) + + boldRed := red.Add(color.Bold) + boldRed.Println("This will print text in bold red.") + + whiteBackground := red.Add(color.BgWhite) + whiteBackground.Println("Red text with White background.") + + // Use your own io.Writer output + color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + + blue := color.New(color.FgBlue) + blue.Fprint(myWriter, "This will print text in blue.") + +You can create PrintXxx functions to simplify even more: + + // Create a custom print function for convenient + red := color.New(color.FgRed).PrintfFunc() + red("warning") + red("error: %s", err) + + // Mix up multiple attributes + notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() + notice("don't forget this...") + +You can also FprintXxx functions to pass your own io.Writer: + + blue := color.New(FgBlue).FprintfFunc() + blue(myWriter, "important notice: %s", stars) + + // Mix up with multiple attributes + success := color.New(color.Bold, color.FgGreen).FprintlnFunc() + success(myWriter, don't forget this...") + + +Or create SprintXxx functions to mix strings with other non-colorized strings: + + yellow := New(FgYellow).SprintFunc() + red := New(FgRed).SprintFunc() + + fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Printf("this %s rocks!\n", info("package")) + +Windows support is enabled by default. All Print functions work as intended. +However only for color.SprintXXX functions, user should use fmt.FprintXXX and +set the output to color.Output: + + fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) + +Using with existing code is possible. Just use the Set() method to set the +standard output to the given parameters. That way a rewrite of an existing +code is not required. + + // Use handy standard colors. + color.Set(color.FgYellow) + + fmt.Println("Existing text will be now in Yellow") + fmt.Printf("This one %s\n", "too") + + color.Unset() // don't forget to unset + + // You can mix up parameters + color.Set(color.FgMagenta, color.Bold) + defer color.Unset() // use it in your function + + fmt.Println("All text will be now bold magenta.") + +There might be a case where you want to disable color output (for example to +pipe the standard output of your app to somewhere else). `Color` has support to +disable colors both globally and for single color definition. For example +suppose you have a CLI app and a `--no-color` bool flag. You can easily disable +the color output with: + + var flagNoColor = flag.Bool("no-color", false, "Disable color output") + + if *flagNoColor { + color.NoColor = true // disables colorized output + } + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + + c := color.New(color.FgCyan) + c.Println("Prints cyan text") + + c.DisableColor() + c.Println("This is printed without any color") + + c.EnableColor() + c.Println("This prints again cyan...") +*/ +package color diff --git a/vendor/github.com/fatih/color/go.mod b/vendor/github.com/fatih/color/go.mod new file mode 100644 index 0000000..7887281 --- /dev/null +++ b/vendor/github.com/fatih/color/go.mod @@ -0,0 +1,8 @@ +module github.com/fatih/color + +go 1.13 + +require ( + github.com/mattn/go-colorable v0.1.8 + github.com/mattn/go-isatty v0.0.12 +) diff --git a/vendor/github.com/fatih/color/go.sum b/vendor/github.com/fatih/color/go.sum new file mode 100644 index 0000000..54f7c46 --- /dev/null +++ b/vendor/github.com/fatih/color/go.sum @@ -0,0 +1,7 @@ +github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/francoispqt/gojay/.gitignore b/vendor/github.com/francoispqt/gojay/.gitignore new file mode 100644 index 0000000..43ebdc4 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/.gitignore @@ -0,0 +1,5 @@ +vendor +*.out +*.log +*.test +.vscode diff --git a/vendor/github.com/francoispqt/gojay/.travis.yml b/vendor/github.com/francoispqt/gojay/.travis.yml new file mode 100644 index 0000000..df04aa2 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/.travis.yml @@ -0,0 +1,15 @@ +language: go + +go: + - "1.10.x" + - "1.11.x" + - "1.12.x" + +script: + - go get github.com/golang/dep/cmd/dep github.com/stretchr/testify + - dep ensure -v -vendor-only + - go test ./gojay/codegen/test/... -race + - go test -race -coverprofile=coverage.txt -covermode=atomic + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/francoispqt/gojay/Gopkg.lock b/vendor/github.com/francoispqt/gojay/Gopkg.lock new file mode 100644 index 0000000..d642e9a --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/Gopkg.lock @@ -0,0 +1,163 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:1a37f9f2ae10d161d9688fb6008ffa14e1631e5068cc3e9698008b9e8d40d575" + name = "cloud.google.com/go" + packages = ["compute/metadata"] + pruneopts = "" + revision = "457ea5c15ccf3b87db582c450e80101989da35f7" + version = "v0.40.0" + +[[projects]] + digest = "1:968d8903d598e3fae738325d3410f33f07ea6a2b9ee5591e9c262ee37df6845a" + name = "github.com/go-errors/errors" + packages = ["."] + pruneopts = "" + revision = "a6af135bd4e28680facf08a3d206b454abc877a4" + version = "v1.0.1" + +[[projects]] + digest = "1:529d738b7976c3848cae5cf3a8036440166835e389c1f617af701eeb12a0518d" + name = "github.com/golang/protobuf" + packages = ["proto"] + pruneopts = "" + revision = "b5d812f8a3706043e23a9cd5babf2e5423744d30" + version = "v1.3.1" + +[[projects]] + branch = "master" + digest = "1:cae59d7b8243c671c9f544965522ba35c0fec48ee80adb9f1400cd2f33abbbec" + name = "github.com/mailru/easyjson" + packages = [ + ".", + "buffer", + "jlexer", + "jwriter", + ] + pruneopts = "" + revision = "1ea4449da9834f4d333f1cc461c374aea217d249" + +[[projects]] + digest = "1:1d7e1867c49a6dd9856598ef7c3123604ea3daabf5b83f303ff457bcbc410b1d" + name = "github.com/pkg/errors" + packages = ["."] + pruneopts = "" + revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4" + version = "v0.8.1" + +[[projects]] + digest = "1:8d4bbd8ab012efc77ab6b97286f2aff262bcdeac9803bb57d75cf7d0a5e6a877" + name = "github.com/viant/assertly" + packages = ["."] + pruneopts = "" + revision = "04f45e0aeb6f3455884877b047a97bcc95dc9493" + version = "v0.4.8" + +[[projects]] + digest = "1:5913451bc2d274673c0716efe226a137625740cd9380641f4d8300ff4f2d82a0" + name = "github.com/viant/toolbox" + packages = [ + ".", + "cred", + "data", + "storage", + "url", + ] + pruneopts = "" + revision = "1be8e4d172138324f40d55ea61a2aeab0c5ce864" + version = "v0.24.0" + +[[projects]] + branch = "master" + digest = "1:9d150270ca2c3356f2224a0878daa1652e4d0b25b345f18b4f6e156cc4b8ec5e" + name = "golang.org/x/crypto" + packages = [ + "blowfish", + "curve25519", + "ed25519", + "ed25519/internal/edwards25519", + "internal/chacha20", + "internal/subtle", + "poly1305", + "ssh", + ] + pruneopts = "" + revision = "f99c8df09eb5bff426315721bfa5f16a99cad32c" + +[[projects]] + branch = "master" + digest = "1:5a56f211e7c12a65c5585c629457a2fb91d8719844ee8fab92727ea8adb5721c" + name = "golang.org/x/net" + packages = [ + "context", + "context/ctxhttp", + "websocket", + ] + pruneopts = "" + revision = "461777fb6f67e8cb9d70cda16573678d085a74cf" + +[[projects]] + branch = "master" + digest = "1:01bdbbc604dcd5afb6f66a717f69ad45e9643c72d5bc11678d44ffa5c50f9e42" + name = "golang.org/x/oauth2" + packages = [ + ".", + "google", + "internal", + "jws", + "jwt", + ] + pruneopts = "" + revision = "0f29369cfe4552d0e4bcddc57cc75f4d7e672a33" + +[[projects]] + branch = "master" + digest = "1:8ddb956f67d4c176abbbc42b7514aaeaf9ea30daa24e27d2cf30ad82f9334a2c" + name = "golang.org/x/sys" + packages = ["cpu"] + pruneopts = "" + revision = "1e42afee0f762ed3d76e6dd942e4181855fd1849" + +[[projects]] + digest = "1:47f391ee443f578f01168347818cb234ed819521e49e4d2c8dd2fb80d48ee41a" + name = "google.golang.org/appengine" + packages = [ + ".", + "internal", + "internal/app_identity", + "internal/base", + "internal/datastore", + "internal/log", + "internal/modules", + "internal/remote_api", + "internal/urlfetch", + "urlfetch", + ] + pruneopts = "" + revision = "b2f4a3cf3c67576a2ee09e1fe62656a5086ce880" + version = "v1.6.1" + +[[projects]] + digest = "1:cedccf16b71e86db87a24f8d4c70b0a855872eb967cb906a66b95de56aefbd0d" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "" + revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" + version = "v2.2.2" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "github.com/go-errors/errors", + "github.com/mailru/easyjson", + "github.com/mailru/easyjson/jlexer", + "github.com/mailru/easyjson/jwriter", + "github.com/viant/assertly", + "github.com/viant/toolbox", + "github.com/viant/toolbox/url", + "golang.org/x/net/websocket", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/francoispqt/gojay/Gopkg.toml b/vendor/github.com/francoispqt/gojay/Gopkg.toml new file mode 100644 index 0000000..fa60792 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/Gopkg.toml @@ -0,0 +1,23 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +ignored = ["github.com/francoispqt/benchmarks*","github.com/stretchr/testify*","github.com/stretchr/testify","github.com/json-iterator/go","github.com/buger/jsonparser"] diff --git a/vendor/github.com/francoispqt/gojay/LICENSE b/vendor/github.com/francoispqt/gojay/LICENSE new file mode 100644 index 0000000..df21596 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 gojay + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/francoispqt/gojay/Makefile b/vendor/github.com/francoispqt/gojay/Makefile new file mode 100644 index 0000000..ce95723 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/Makefile @@ -0,0 +1,11 @@ +.PHONY: test +test: + go test -race -run=^Test -v + +.PHONY: cover +cover: + go test -coverprofile=coverage.out -covermode=atomic + +.PHONY: coverhtml +coverhtml: + go tool cover -html=coverage.out \ No newline at end of file diff --git a/vendor/github.com/francoispqt/gojay/README.md b/vendor/github.com/francoispqt/gojay/README.md new file mode 100644 index 0000000..b2abd29 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/README.md @@ -0,0 +1,855 @@ +[![Build Status](https://travis-ci.org/francoispqt/gojay.svg?branch=master)](https://travis-ci.org/francoispqt/gojay) +[![codecov](https://codecov.io/gh/francoispqt/gojay/branch/master/graph/badge.svg)](https://codecov.io/gh/francoispqt/gojay) +[![Go Report Card](https://goreportcard.com/badge/github.com/francoispqt/gojay)](https://goreportcard.com/report/github.com/francoispqt/gojay) +[![Go doc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square +)](https://godoc.org/github.com/francoispqt/gojay) +![MIT License](https://img.shields.io/badge/license-mit-blue.svg?style=flat-square) +[![Sourcegraph](https://sourcegraph.com/github.com/francoispqt/gojay/-/badge.svg)](https://sourcegraph.com/github.com/francoispqt/gojay) +![stability-stable](https://img.shields.io/badge/stability-stable-green.svg) + +# GoJay + + + +GoJay is a performant JSON encoder/decoder for Golang (currently the most performant, [see benchmarks](#benchmark-results)). + +It has a simple API and doesn't use reflection. It relies on small interfaces to decode/encode structures and slices. + +Gojay also comes with powerful stream decoding features and an even faster [Unsafe](#unsafe-api) API. + +There is also a [code generation tool](https://github.com/francoispqt/gojay/tree/master/gojay) to make usage easier and faster. + +# Why another JSON parser? + +I looked at other fast decoder/encoder and realised it was mostly hardly readable static code generation or a lot of reflection, poor streaming features, and not so fast in the end. + +Also, I wanted to build a decoder that could consume an io.Reader of line or comma delimited JSON, in a JIT way. To consume a flow of JSON objects from a TCP connection for example or from a standard output. Same way I wanted to build an encoder that could encode a flow of data to a io.Writer. + +This is how GoJay aims to be a very fast, JIT stream parser with 0 reflection, low allocation with a friendly API. + +# Get started + +```bash +go get github.com/francoispqt/gojay +``` + +* [Encoder](#encoding) +* [Decoder](#decoding) +* [Stream API](#stream-api) +* [Code Generation](https://github.com/francoispqt/gojay/tree/master/gojay) + +## Decoding + +Decoding is done through two different API similar to standard `encoding/json`: +* [Unmarshal](#unmarshal-api) +* [Decode](#decode-api) + + +Example of basic stucture decoding with Unmarshal: +```go +import "github.com/francoispqt/gojay" + +type user struct { + id int + name string + email string +} +// implement gojay.UnmarshalerJSONObject +func (u *user) UnmarshalJSONObject(dec *gojay.Decoder, key string) error { + switch key { + case "id": + return dec.Int(&u.id) + case "name": + return dec.String(&u.name) + case "email": + return dec.String(&u.email) + } + return nil +} +func (u *user) NKeys() int { + return 3 +} + +func main() { + u := &user{} + d := []byte(`{"id":1,"name":"gojay","email":"gojay@email.com"}`) + err := gojay.UnmarshalJSONObject(d, u) + if err != nil { + log.Fatal(err) + } +} +``` + +with Decode: +```go +func main() { + u := &user{} + dec := gojay.NewDecoder(bytes.NewReader([]byte(`{"id":1,"name":"gojay","email":"gojay@email.com"}`))) + err := dec.DecodeObject(d, u) + if err != nil { + log.Fatal(err) + } +} +``` + +### Unmarshal API + +Unmarshal API decodes a `[]byte` to a given pointer with a single function. + +Behind the doors, Unmarshal API borrows a `*gojay.Decoder` resets its settings and decodes the data to the given pointer and releases the `*gojay.Decoder` to the pool when it finishes, whether it encounters an error or not. + +If it cannot find the right Decoding strategy for the type of the given pointer, it returns an `InvalidUnmarshalError`. You can test the error returned by doing `if ok := err.(InvalidUnmarshalError); ok {}`. + +Unmarshal API comes with three functions: +* Unmarshal +```go +func Unmarshal(data []byte, v interface{}) error +``` + +* UnmarshalJSONObject +```go +func UnmarshalJSONObject(data []byte, v gojay.UnmarshalerJSONObject) error +``` + +* UnmarshalJSONArray +```go +func UnmarshalJSONArray(data []byte, v gojay.UnmarshalerJSONArray) error +``` + + +### Decode API + +Decode API decodes a `[]byte` to a given pointer by creating or borrowing a `*gojay.Decoder` with an `io.Reader` and calling `Decode` methods. + +__Getting a *gojay.Decoder or Borrowing__ + +You can either get a fresh `*gojay.Decoder` calling `dec := gojay.NewDecoder(io.Reader)` or borrow one from the pool by calling `dec := gojay.BorrowDecoder(io.Reader)`. + +After using a decoder, you can release it by calling `dec.Release()`. Beware, if you reuse the decoder after releasing it, it will panic with an error of type `InvalidUsagePooledDecoderError`. If you want to fully benefit from the pooling, you must release your decoders after using. + +Example getting a fresh an releasing: +```go +str := "" +dec := gojay.NewDecoder(strings.NewReader(`"test"`)) +defer dec.Release() +if err := dec.Decode(&str); err != nil { + log.Fatal(err) +} +``` +Example borrowing a decoder and releasing: +```go +str := "" +dec := gojay.BorrowDecoder(strings.NewReader(`"test"`)) +defer dec.Release() +if err := dec.Decode(&str); err != nil { + log.Fatal(err) +} +``` + +`*gojay.Decoder` has multiple methods to decode to specific types: +* Decode +```go +func (dec *gojay.Decoder) Decode(v interface{}) error +``` +* DecodeObject +```go +func (dec *gojay.Decoder) DecodeObject(v gojay.UnmarshalerJSONObject) error +``` +* DecodeArray +```go +func (dec *gojay.Decoder) DecodeArray(v gojay.UnmarshalerJSONArray) error +``` +* DecodeInt +```go +func (dec *gojay.Decoder) DecodeInt(v *int) error +``` +* DecodeBool +```go +func (dec *gojay.Decoder) DecodeBool(v *bool) error +``` +* DecodeString +```go +func (dec *gojay.Decoder) DecodeString(v *string) error +``` + +All DecodeXxx methods are used to decode top level JSON values. If you are decoding keys or items of a JSON object or array, don't use the Decode methods. + +Example: +```go +reader := strings.NewReader(`"John Doe"`) +dec := NewDecoder(reader) + +var str string +err := dec.DecodeString(&str) +if err != nil { + log.Fatal(err) +} + +fmt.Println(str) // John Doe +``` + +### Structs and Maps +#### UnmarshalerJSONObject Interface + +To unmarshal a JSON object to a structure, the structure must implement the `UnmarshalerJSONObject` interface: +```go +type UnmarshalerJSONObject interface { + UnmarshalJSONObject(*gojay.Decoder, string) error + NKeys() int +} +``` +`UnmarshalJSONObject` method takes two arguments, the first one is a pointer to the Decoder (*gojay.Decoder) and the second one is the string value of the current key being parsed. If the JSON data is not an object, the UnmarshalJSONObject method will never be called. + +`NKeys` method must return the number of keys to Unmarshal in the JSON object or 0. If zero is returned, all keys will be parsed. + +Example of implementation for a struct: +```go +type user struct { + id int + name string + email string +} +// implement UnmarshalerJSONObject +func (u *user) UnmarshalJSONObject(dec *gojay.Decoder, key string) error { + switch key { + case "id": + return dec.Int(&u.id) + case "name": + return dec.String(&u.name) + case "email": + return dec.String(&u.email) + } + return nil +} +func (u *user) NKeys() int { + return 3 +} +``` + +Example of implementation for a `map[string]string`: +```go +// define our custom map type implementing UnmarshalerJSONObject +type message map[string]string + +// Implementing Unmarshaler +func (m message) UnmarshalJSONObject(dec *gojay.Decoder, k string) error { + str := "" + err := dec.String(&str) + if err != nil { + return err + } + m[k] = str + return nil +} + +// we return 0, it tells the Decoder to decode all keys +func (m message) NKeys() int { + return 0 +} +``` + +### Arrays, Slices and Channels + +To unmarshal a JSON object to a slice an array or a channel, it must implement the UnmarshalerJSONArray interface: +```go +type UnmarshalerJSONArray interface { + UnmarshalJSONArray(*gojay.Decoder) error +} +``` +UnmarshalJSONArray method takes one argument, a pointer to the Decoder (*gojay.Decoder). If the JSON data is not an array, the Unmarshal method will never be called. + +Example of implementation with a slice: +```go +type testSlice []string +// implement UnmarshalerJSONArray +func (t *testSlice) UnmarshalJSONArray(dec *gojay.Decoder) error { + str := "" + if err := dec.String(&str); err != nil { + return err + } + *t = append(*t, str) + return nil +} + +func main() { + dec := gojay.BorrowDecoder(strings.NewReader(`["Tom", "Jim"]`)) + var slice testSlice + err := dec.DecodeArray(&slice) + if err != nil { + log.Fatal(err) + } + fmt.Println(slice) // [Tom Jim] + dec.Release() +} +``` + +Example of implementation with a channel: +```go +type testChannel chan string +// implement UnmarshalerJSONArray +func (c testChannel) UnmarshalJSONArray(dec *gojay.Decoder) error { + str := "" + if err := dec.String(&str); err != nil { + return err + } + c <- str + return nil +} + +func main() { + dec := gojay.BorrowDecoder(strings.NewReader(`["Tom", "Jim"]`)) + c := make(testChannel, 2) + err := dec.DecodeArray(c) + if err != nil { + log.Fatal(err) + } + for i := 0; i < 2; i++ { + fmt.Println(<-c) + } + close(c) + dec.Release() +} +``` + +Example of implementation with an array: +```go +type testArray [3]string +// implement UnmarshalerJSONArray +func (a *testArray) UnmarshalJSONArray(dec *Decoder) error { + var str string + if err := dec.String(&str); err != nil { + return err + } + a[dec.Index()] = str + return nil +} + +func main() { + dec := gojay.BorrowDecoder(strings.NewReader(`["Tom", "Jim", "Bob"]`)) + var a testArray + err := dec.DecodeArray(&a) + fmt.Println(a) // [Tom Jim Bob] + dec.Release() +} +``` + +### Other types +To decode other types (string, int, int32, int64, uint32, uint64, float, booleans), you don't need to implement any interface. + +Example of encoding strings: +```go +func main() { + json := []byte(`"Jay"`) + var v string + err := gojay.Unmarshal(json, &v) + if err != nil { + log.Fatal(err) + } + fmt.Println(v) // Jay +} +``` + +### Decode values methods +When decoding a JSON object of a JSON array using `UnmarshalerJSONObject` or `UnmarshalerJSONArray` interface, the `gojay.Decoder` provides dozens of methods to Decode multiple types. + +Non exhaustive list of methods available (to see all methods, check the godoc): +```go +dec.Int +dec.Int8 +dec.Int16 +dec.Int32 +dec.Int64 +dec.Uint8 +dec.Uint16 +dec.Uint32 +dec.Uint64 +dec.String +dec.Time +dec.Bool +dec.SQLNullString +dec.SQLNullInt64 +``` + + +## Encoding + +Encoding is done through two different API similar to standard `encoding/json`: +* [Marshal](#marshal-api) +* [Encode](#encode-api) + +Example of basic structure encoding with Marshal: +```go +import "github.com/francoispqt/gojay" + +type user struct { + id int + name string + email string +} + +// implement MarshalerJSONObject +func (u *user) MarshalJSONObject(enc *gojay.Encoder) { + enc.IntKey("id", u.id) + enc.StringKey("name", u.name) + enc.StringKey("email", u.email) +} +func (u *user) IsNil() bool { + return u == nil +} + +func main() { + u := &user{1, "gojay", "gojay@email.com"} + b, err := gojay.MarshalJSONObject(u) + if err != nil { + log.Fatal(err) + } + fmt.Println(string(b)) // {"id":1,"name":"gojay","email":"gojay@email.com"} +} +``` + +with Encode: +```go +func main() { + u := &user{1, "gojay", "gojay@email.com"} + b := strings.Builder{} + enc := gojay.NewEncoder(&b) + if err := enc.Encode(u); err != nil { + log.Fatal(err) + } + fmt.Println(b.String()) // {"id":1,"name":"gojay","email":"gojay@email.com"} +} +``` + +### Marshal API + +Marshal API encodes a value to a JSON `[]byte` with a single function. + +Behind the doors, Marshal API borrows a `*gojay.Encoder` resets its settings and encodes the data to an internal byte buffer and releases the `*gojay.Encoder` to the pool when it finishes, whether it encounters an error or not. + +If it cannot find the right Encoding strategy for the type of the given value, it returns an `InvalidMarshalError`. You can test the error returned by doing `if ok := err.(InvalidMarshalError); ok {}`. + +Marshal API comes with three functions: +* Marshal +```go +func Marshal(v interface{}) ([]byte, error) +``` + +* MarshalJSONObject +```go +func MarshalJSONObject(v gojay.MarshalerJSONObject) ([]byte, error) +``` + +* MarshalJSONArray +```go +func MarshalJSONArray(v gojay.MarshalerJSONArray) ([]byte, error) +``` + +### Encode API + +Encode API decodes a value to JSON by creating or borrowing a `*gojay.Encoder` sending it to an `io.Writer` and calling `Encode` methods. + +__Getting a *gojay.Encoder or Borrowing__ + +You can either get a fresh `*gojay.Encoder` calling `enc := gojay.NewEncoder(io.Writer)` or borrow one from the pool by calling `enc := gojay.BorrowEncoder(io.Writer)`. + +After using an encoder, you can release it by calling `enc.Release()`. Beware, if you reuse the encoder after releasing it, it will panic with an error of type `InvalidUsagePooledEncoderError`. If you want to fully benefit from the pooling, you must release your encoders after using. + +Example getting a fresh encoder an releasing: +```go +str := "test" +b := strings.Builder{} +enc := gojay.NewEncoder(&b) +defer enc.Release() +if err := enc.Encode(str); err != nil { + log.Fatal(err) +} +``` +Example borrowing an encoder and releasing: +```go +str := "test" +b := strings.Builder{} +enc := gojay.BorrowEncoder(b) +defer enc.Release() +if err := enc.Encode(str); err != nil { + log.Fatal(err) +} +``` + +`*gojay.Encoder` has multiple methods to encoder specific types to JSON: +* Encode +```go +func (enc *gojay.Encoder) Encode(v interface{}) error +``` +* EncodeObject +```go +func (enc *gojay.Encoder) EncodeObject(v gojay.MarshalerJSONObject) error +``` +* EncodeArray +```go +func (enc *gojay.Encoder) EncodeArray(v gojay.MarshalerJSONArray) error +``` +* EncodeInt +```go +func (enc *gojay.Encoder) EncodeInt(n int) error +``` +* EncodeInt64 +```go +func (enc *gojay.Encoder) EncodeInt64(n int64) error +``` +* EncodeFloat +```go +func (enc *gojay.Encoder) EncodeFloat(n float64) error +``` +* EncodeBool +```go +func (enc *gojay.Encoder) EncodeBool(v bool) error +``` +* EncodeString +```go +func (enc *gojay.Encoder) EncodeString(s string) error +``` + +### Structs and Maps + +To encode a structure, the structure must implement the MarshalerJSONObject interface: +```go +type MarshalerJSONObject interface { + MarshalJSONObject(enc *gojay.Encoder) + IsNil() bool +} +``` +`MarshalJSONObject` method takes one argument, a pointer to the Encoder (*gojay.Encoder). The method must add all the keys in the JSON Object by calling Decoder's methods. + +IsNil method returns a boolean indicating if the interface underlying value is nil or not. It is used to safely ensure that the underlying value is not nil without using Reflection. + +Example of implementation for a struct: +```go +type user struct { + id int + name string + email string +} + +// implement MarshalerJSONObject +func (u *user) MarshalJSONObject(enc *gojay.Encoder) { + enc.IntKey("id", u.id) + enc.StringKey("name", u.name) + enc.StringKey("email", u.email) +} +func (u *user) IsNil() bool { + return u == nil +} +``` + +Example of implementation for a `map[string]string`: +```go +// define our custom map type implementing MarshalerJSONObject +type message map[string]string + +// Implementing Marshaler +func (m message) MarshalJSONObject(enc *gojay.Encoder) { + for k, v := range m { + enc.StringKey(k, v) + } +} + +func (m message) IsNil() bool { + return m == nil +} +``` + +### Arrays and Slices +To encode an array or a slice, the slice/array must implement the MarshalerJSONArray interface: +```go +type MarshalerJSONArray interface { + MarshalJSONArray(enc *gojay.Encoder) + IsNil() bool +} +``` +`MarshalJSONArray` method takes one argument, a pointer to the Encoder (*gojay.Encoder). The method must add all element in the JSON Array by calling Decoder's methods. + +`IsNil` method returns a boolean indicating if the interface underlying value is nil(empty) or not. It is used to safely ensure that the underlying value is not nil without using Reflection and also to in `OmitEmpty` feature. + +Example of implementation: +```go +type users []*user +// implement MarshalerJSONArray +func (u *users) MarshalJSONArray(enc *gojay.Encoder) { + for _, e := range u { + enc.Object(e) + } +} +func (u *users) IsNil() bool { + return len(u) == 0 +} +``` + +### Other types +To encode other types (string, int, float, booleans), you don't need to implement any interface. + +Example of encoding strings: +```go +func main() { + name := "Jay" + b, err := gojay.Marshal(name) + if err != nil { + log.Fatal(err) + } + fmt.Println(string(b)) // "Jay" +} +``` + +# Stream API + +### Stream Decoding +GoJay ships with a powerful stream decoder. + +It allows to read continuously from an io.Reader stream and do JIT decoding writing unmarshalled JSON to a channel to allow async consuming. + +When using the Stream API, the Decoder implements context.Context to provide graceful cancellation. + +To decode a stream of JSON, you must call `gojay.Stream.DecodeStream` and pass it a `UnmarshalerStream` implementation. + +```go +type UnmarshalerStream interface { + UnmarshalStream(*StreamDecoder) error +} +``` + +Example of implementation of stream reading from a WebSocket connection: +```go +// implement UnmarshalerStream +type ChannelStream chan *user + +func (c ChannelStream) UnmarshalStream(dec *gojay.StreamDecoder) error { + u := &user{} + if err := dec.Object(u); err != nil { + return err + } + c <- u + return nil +} + +func main() { + // get our websocket connection + origin := "http://localhost/" + url := "ws://localhost:12345/ws" + ws, err := websocket.Dial(url, "", origin) + if err != nil { + log.Fatal(err) + } + // create our channel which will receive our objects + streamChan := ChannelStream(make(chan *user)) + // borrow a decoder + dec := gojay.Stream.BorrowDecoder(ws) + // start decoding, it will block until a JSON message is decoded from the WebSocket + // or until Done channel is closed + go dec.DecodeStream(streamChan) + for { + select { + case v := <-streamChan: + // Got something from my websocket! + log.Println(v) + case <-dec.Done(): + log.Println("finished reading from WebSocket") + os.Exit(0) + } + } +} +``` + +### Stream Encoding +GoJay ships with a powerful stream encoder part of the Stream API. + +It allows to write continuously to an io.Writer and do JIT encoding of data fed to a channel to allow async consuming. You can set multiple consumers on the channel to be as performant as possible. Consumers are non blocking and are scheduled individually in their own go routine. + +When using the Stream API, the Encoder implements context.Context to provide graceful cancellation. + +To encode a stream of data, you must call `EncodeStream` and pass it a `MarshalerStream` implementation. + +```go +type MarshalerStream interface { + MarshalStream(enc *gojay.StreamEncoder) +} +``` + +Example of implementation of stream writing to a WebSocket: +```go +// Our structure which will be pushed to our stream +type user struct { + id int + name string + email string +} + +func (u *user) MarshalJSONObject(enc *gojay.Encoder) { + enc.IntKey("id", u.id) + enc.StringKey("name", u.name) + enc.StringKey("email", u.email) +} +func (u *user) IsNil() bool { + return u == nil +} + +// Our MarshalerStream implementation +type StreamChan chan *user + +func (s StreamChan) MarshalStream(enc *gojay.StreamEncoder) { + select { + case <-enc.Done(): + return + case o := <-s: + enc.Object(o) + } +} + +// Our main function +func main() { + // get our websocket connection + origin := "http://localhost/" + url := "ws://localhost:12345/ws" + ws, err := websocket.Dial(url, "", origin) + if err != nil { + log.Fatal(err) + } + // we borrow an encoder set stdout as the writer, + // set the number of consumer to 10 + // and tell the encoder to separate each encoded element + // added to the channel by a new line character + enc := gojay.Stream.BorrowEncoder(ws).NConsumer(10).LineDelimited() + // instantiate our MarshalerStream + s := StreamChan(make(chan *user)) + // start the stream encoder + // will block its goroutine until enc.Cancel(error) is called + // or until something is written to the channel + go enc.EncodeStream(s) + // write to our MarshalerStream + for i := 0; i < 1000; i++ { + s <- &user{i, "username", "user@email.com"} + } + // Wait + <-enc.Done() +} +``` + +# Unsafe API + +Unsafe API has the same functions than the regular API, it only has `Unmarshal API` for now. It is unsafe because it makes assumptions on the quality of the given JSON. + +If you are not sure if your JSON is valid, don't use the Unsafe API. + +Also, the `Unsafe` API does not copy the buffer when using Unmarshal API, which, in case of string decoding, can lead to data corruption if a byte buffer is reused. Using the `Decode` API makes `Unsafe` API safer as the io.Reader relies on `copy` builtin method and `Decoder` will have its own internal buffer :) + +Access the `Unsafe` API this way: +```go +gojay.Unsafe.Unmarshal(b, v) +``` + + +# Benchmarks + +Benchmarks encode and decode three different data based on size (small, medium, large). + +To run benchmark for decoder: +```bash +cd $GOPATH/src/github.com/francoispqt/gojay/benchmarks/decoder && make bench +``` + +To run benchmark for encoder: +```bash +cd $GOPATH/src/github.com/francoispqt/gojay/benchmarks/encoder && make bench +``` + +# Benchmark Results +## Decode + + + +### Small Payload +[benchmark code is here](https://github.com/francoispqt/gojay/blob/master/benchmarks/decoder/decoder_bench_small_test.go) + +[benchmark data is here](https://github.com/francoispqt/gojay/blob/master/benchmarks/benchmarks_small.go) + +| | ns/op | bytes/op | allocs/op | +|-----------------|-----------|--------------|-----------| +| Std Library | 2547 | 496 | 4 | +| JsonIter | 2046 | 312 | 12 | +| JsonParser | 1408 | 0 | 0 | +| EasyJson | 929 | 240 | 2 | +| **GoJay** | **807** | **256** | **2** | +| **GoJay-unsafe**| **712** | **112** | **1** | + +### Medium Payload +[benchmark code is here](https://github.com/francoispqt/gojay/blob/master/benchmarks/decoder/decoder_bench_medium_test.go) + +[benchmark data is here](https://github.com/francoispqt/gojay/blob/master/benchmarks/benchmarks_medium.go) + +| | ns/op | bytes/op | allocs/op | +|-----------------|-----------|----------|-----------| +| Std Library | 30148 | 2152 | 496 | +| JsonIter | 16309 | 2976 | 80 | +| JsonParser | 7793 | 0 | 0 | +| EasyJson | 7957 | 232 | 6 | +| **GoJay** | **4984** | **2448** | **8** | +| **GoJay-unsafe**| **4809** | **144** | **7** | + +### Large Payload +[benchmark code is here](https://github.com/francoispqt/gojay/blob/master/benchmarks/decoder/decoder_bench_large_test.go) + +[benchmark data is here](https://github.com/francoispqt/gojay/blob/master/benchmarks/benchmarks_large.go) + +| | ns/op | bytes/op | allocs/op | +|-----------------|-----------|-------------|-----------| +| JsonIter | 210078 | 41712 | 1136 | +| EasyJson | 106626 | 160 | 2 | +| JsonParser | 66813 | 0 | 0 | +| **GoJay** | **52153** | **31241** | **77** | +| **GoJay-unsafe**| **48277** | **2561** | **76** | + +## Encode + + + +### Small Struct +[benchmark code is here](https://github.com/francoispqt/gojay/blob/master/benchmarks/encoder/encoder_bench_small_test.go) + +[benchmark data is here](https://github.com/francoispqt/gojay/blob/master/benchmarks/benchmarks_small.go) + +| | ns/op | bytes/op | allocs/op | +|----------------|----------|--------------|-----------| +| Std Library | 1280 | 464 | 3 | +| EasyJson | 871 | 944 | 6 | +| JsonIter | 866 | 272 | 3 | +| **GoJay** | **543** | **112** | **1** | +| **GoJay-func** | **347** | **0** | **0** | + +### Medium Struct +[benchmark code is here](https://github.com/francoispqt/gojay/blob/master/benchmarks/encoder/encoder_bench_medium_test.go) + +[benchmark data is here](https://github.com/francoispqt/gojay/blob/master/benchmarks/benchmarks_medium.go) + +| | ns/op | bytes/op | allocs/op | +|-------------|----------|--------------|-----------| +| Std Library | 5006 | 1496 | 25 | +| JsonIter | 2232 | 1544 | 20 | +| EasyJson | 1997 | 1544 | 19 | +| **GoJay** | **1522** | **312** | **14** | + +### Large Struct +[benchmark code is here](https://github.com/francoispqt/gojay/blob/master/benchmarks/encoder/encoder_bench_large_test.go) + +[benchmark data is here](https://github.com/francoispqt/gojay/blob/master/benchmarks/benchmarks_large.go) + +| | ns/op | bytes/op | allocs/op | +|-------------|-----------|--------------|-----------| +| Std Library | 66441 | 20576 | 332 | +| JsonIter | 35247 | 20255 | 328 | +| EasyJson | 32053 | 15474 | 327 | +| **GoJay** | **27847** | **9802** | **318** | + +# Contributing + +Contributions are welcome :) + +If you encounter issues please report it in Github and/or send an email at [francois@parquet.ninja](mailto:francois@parquet.ninja) + diff --git a/vendor/github.com/francoispqt/gojay/decode.go b/vendor/github.com/francoispqt/gojay/decode.go new file mode 100644 index 0000000..fbd07f7 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/decode.go @@ -0,0 +1,386 @@ +package gojay + +import ( + "fmt" + "io" +) + +// UnmarshalJSONArray parses the JSON-encoded data and stores the result in the value pointed to by v. +// +// v must implement UnmarshalerJSONArray. +// +// If a JSON value is not appropriate for a given target type, or if a JSON number +// overflows the target type, UnmarshalJSONArray skips that field and completes the unmarshaling as best it can. +func UnmarshalJSONArray(data []byte, v UnmarshalerJSONArray) error { + dec := borrowDecoder(nil, 0) + defer dec.Release() + dec.data = make([]byte, len(data)) + copy(dec.data, data) + dec.length = len(data) + _, err := dec.decodeArray(v) + if err != nil { + return err + } + if dec.err != nil { + return dec.err + } + return nil +} + +// UnmarshalJSONObject parses the JSON-encoded data and stores the result in the value pointed to by v. +// +// v must implement UnmarshalerJSONObject. +// +// If a JSON value is not appropriate for a given target type, or if a JSON number +// overflows the target type, UnmarshalJSONObject skips that field and completes the unmarshaling as best it can. +func UnmarshalJSONObject(data []byte, v UnmarshalerJSONObject) error { + dec := borrowDecoder(nil, 0) + defer dec.Release() + dec.data = make([]byte, len(data)) + copy(dec.data, data) + dec.length = len(data) + _, err := dec.decodeObject(v) + if err != nil { + return err + } + if dec.err != nil { + return dec.err + } + return nil +} + +// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. +// If v is nil, not an implementation of UnmarshalerJSONObject or UnmarshalerJSONArray or not one of the following types: +// *string, **string, *int, **int, *int8, **int8, *int16, **int16, *int32, **int32, *int64, **int64, *uint8, **uint8, *uint16, **uint16, +// *uint32, **uint32, *uint64, **uint64, *float64, **float64, *float32, **float32, *bool, **bool +// Unmarshal returns an InvalidUnmarshalError. +// +// +// If a JSON value is not appropriate for a given target type, or if a JSON number +// overflows the target type, Unmarshal skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns an UnmarshalTypeError describing the earliest such error. +// In any case, it's not guaranteed that all the remaining fields following the problematic one will be unmarshaled into the target object. +func Unmarshal(data []byte, v interface{}) error { + var err error + var dec *Decoder + switch vt := v.(type) { + case *string: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeString(vt) + case **string: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeStringNull(vt) + case *int: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeInt(vt) + case **int: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeIntNull(vt) + case *int8: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeInt8(vt) + case **int8: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeInt8Null(vt) + case *int16: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeInt16(vt) + case **int16: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeInt16Null(vt) + case *int32: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeInt32(vt) + case **int32: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeInt32Null(vt) + case *int64: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeInt64(vt) + case **int64: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeInt64Null(vt) + case *uint8: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeUint8(vt) + case **uint8: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeUint8Null(vt) + case *uint16: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeUint16(vt) + case **uint16: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeUint16Null(vt) + case *uint32: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeUint32(vt) + case **uint32: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeUint32Null(vt) + case *uint64: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeUint64(vt) + case **uint64: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeUint64Null(vt) + case *float64: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeFloat64(vt) + case **float64: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeFloat64Null(vt) + case *float32: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeFloat32(vt) + case **float32: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeFloat32Null(vt) + case *bool: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeBool(vt) + case **bool: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeBoolNull(vt) + case UnmarshalerJSONObject: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = make([]byte, len(data)) + copy(dec.data, data) + _, err = dec.decodeObject(vt) + case UnmarshalerJSONArray: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = make([]byte, len(data)) + copy(dec.data, data) + _, err = dec.decodeArray(vt) + case *interface{}: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = make([]byte, len(data)) + copy(dec.data, data) + err = dec.decodeInterface(vt) + default: + return InvalidUnmarshalError(fmt.Sprintf(invalidUnmarshalErrorMsg, vt)) + } + defer dec.Release() + if err != nil { + return err + } + return dec.err +} + +// UnmarshalerJSONObject is the interface to implement to decode a JSON Object. +type UnmarshalerJSONObject interface { + UnmarshalJSONObject(*Decoder, string) error + NKeys() int +} + +// UnmarshalerJSONArray is the interface to implement to decode a JSON Array. +type UnmarshalerJSONArray interface { + UnmarshalJSONArray(*Decoder) error +} + +// A Decoder reads and decodes JSON values from an input stream. +type Decoder struct { + r io.Reader + data []byte + err error + isPooled byte + called byte + child byte + cursor int + length int + keysDone int + arrayIndex int +} + +// Decode reads the next JSON-encoded value from the decoder's input (io.Reader) and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the conversion of JSON into a Go value. +// The differences between Decode and Unmarshal are: +// - Decode reads from an io.Reader in the Decoder, whereas Unmarshal reads from a []byte +// - Decode leaves to the user the option of borrowing and releasing a Decoder, whereas Unmarshal internally always borrows a Decoder and releases it when the unmarshaling is completed +func (dec *Decoder) Decode(v interface{}) error { + if dec.isPooled == 1 { + panic(InvalidUsagePooledDecoderError("Invalid usage of pooled decoder")) + } + var err error + switch vt := v.(type) { + case *string: + err = dec.decodeString(vt) + case **string: + err = dec.decodeStringNull(vt) + case *int: + err = dec.decodeInt(vt) + case **int: + err = dec.decodeIntNull(vt) + case *int8: + err = dec.decodeInt8(vt) + case **int8: + err = dec.decodeInt8Null(vt) + case *int16: + err = dec.decodeInt16(vt) + case **int16: + err = dec.decodeInt16Null(vt) + case *int32: + err = dec.decodeInt32(vt) + case **int32: + err = dec.decodeInt32Null(vt) + case *int64: + err = dec.decodeInt64(vt) + case **int64: + err = dec.decodeInt64Null(vt) + case *uint8: + err = dec.decodeUint8(vt) + case **uint8: + err = dec.decodeUint8Null(vt) + case *uint16: + err = dec.decodeUint16(vt) + case **uint16: + err = dec.decodeUint16Null(vt) + case *uint32: + err = dec.decodeUint32(vt) + case **uint32: + err = dec.decodeUint32Null(vt) + case *uint64: + err = dec.decodeUint64(vt) + case **uint64: + err = dec.decodeUint64Null(vt) + case *float64: + err = dec.decodeFloat64(vt) + case **float64: + err = dec.decodeFloat64Null(vt) + case *float32: + err = dec.decodeFloat32(vt) + case **float32: + err = dec.decodeFloat32Null(vt) + case *bool: + err = dec.decodeBool(vt) + case **bool: + err = dec.decodeBoolNull(vt) + case UnmarshalerJSONObject: + _, err = dec.decodeObject(vt) + case UnmarshalerJSONArray: + _, err = dec.decodeArray(vt) + case *EmbeddedJSON: + err = dec.decodeEmbeddedJSON(vt) + case *interface{}: + err = dec.decodeInterface(vt) + default: + return InvalidUnmarshalError(fmt.Sprintf(invalidUnmarshalErrorMsg, vt)) + } + if err != nil { + return err + } + return dec.err +} + +// Non exported + +func isDigit(b byte) bool { + switch b { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return true + default: + return false + } +} + +func (dec *Decoder) read() bool { + if dec.r != nil { + // if we reach the end, double the buffer to ensure there's always more space + if len(dec.data) == dec.length { + nLen := dec.length * 2 + if nLen == 0 { + nLen = 512 + } + Buf := make([]byte, nLen, nLen) + copy(Buf, dec.data) + dec.data = Buf + } + var n int + var err error + for n == 0 { + n, err = dec.r.Read(dec.data[dec.length:]) + if err != nil { + if err != io.EOF { + dec.err = err + return false + } + if n == 0 { + return false + } + dec.length = dec.length + n + return true + } + } + dec.length = dec.length + n + return true + } + return false +} + +func (dec *Decoder) nextChar() byte { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case ' ', '\n', '\t', '\r', ',': + continue + } + d := dec.data[dec.cursor] + return d + } + return 0 +} diff --git a/vendor/github.com/francoispqt/gojay/decode_array.go b/vendor/github.com/francoispqt/gojay/decode_array.go new file mode 100644 index 0000000..297f2ee --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/decode_array.go @@ -0,0 +1,247 @@ +package gojay + +import "reflect" + +// DecodeArray reads the next JSON-encoded value from the decoder's input (io.Reader) +// and stores it in the value pointed to by v. +// +// v must implement UnmarshalerJSONArray. +// +// See the documentation for Unmarshal for details about the conversion of JSON into a Go value. +func (dec *Decoder) DecodeArray(v UnmarshalerJSONArray) error { + if dec.isPooled == 1 { + panic(InvalidUsagePooledDecoderError("Invalid usage of pooled decoder")) + } + _, err := dec.decodeArray(v) + return err +} +func (dec *Decoder) decodeArray(arr UnmarshalerJSONArray) (int, error) { + // remember last array index in case of nested arrays + lastArrayIndex := dec.arrayIndex + dec.arrayIndex = 0 + defer func() { + dec.arrayIndex = lastArrayIndex + }() + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case ' ', '\n', '\t', '\r', ',': + continue + case '[': + dec.cursor = dec.cursor + 1 + // array is open, char is not space start readings + for dec.nextChar() != 0 { + // closing array + if dec.data[dec.cursor] == ']' { + dec.cursor = dec.cursor + 1 + return dec.cursor, nil + } + // calling unmarshall function for each element of the slice + err := arr.UnmarshalJSONArray(dec) + if err != nil { + return 0, err + } + dec.arrayIndex++ + } + return 0, dec.raiseInvalidJSONErr(dec.cursor) + case 'n': + // is null + dec.cursor++ + err := dec.assertNull() + if err != nil { + return 0, err + } + return dec.cursor, nil + case '{', '"', 'f', 't', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + // can't unmarshall to struct + // we skip array and set Error + dec.err = dec.makeInvalidUnmarshalErr(arr) + err := dec.skipData() + if err != nil { + return 0, err + } + return dec.cursor, nil + default: + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + } + return 0, dec.raiseInvalidJSONErr(dec.cursor) +} +func (dec *Decoder) decodeArrayNull(v interface{}) (int, error) { + // remember last array index in case of nested arrays + lastArrayIndex := dec.arrayIndex + dec.arrayIndex = 0 + defer func() { + dec.arrayIndex = lastArrayIndex + }() + vv := reflect.ValueOf(v) + vvt := vv.Type() + if vvt.Kind() != reflect.Ptr || vvt.Elem().Kind() != reflect.Ptr { + dec.err = ErrUnmarshalPtrExpected + return 0, dec.err + } + // not an array not an error, but do not know what to do + // do not check syntax + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case ' ', '\n', '\t', '\r', ',': + continue + case '[': + dec.cursor = dec.cursor + 1 + // create our new type + elt := vv.Elem() + n := reflect.New(elt.Type().Elem()) + var arr UnmarshalerJSONArray + var ok bool + if arr, ok = n.Interface().(UnmarshalerJSONArray); !ok { + dec.err = dec.makeInvalidUnmarshalErr((UnmarshalerJSONArray)(nil)) + return 0, dec.err + } + // array is open, char is not space start readings + for dec.nextChar() != 0 { + // closing array + if dec.data[dec.cursor] == ']' { + elt.Set(n) + dec.cursor = dec.cursor + 1 + return dec.cursor, nil + } + // calling unmarshall function for each element of the slice + err := arr.UnmarshalJSONArray(dec) + if err != nil { + return 0, err + } + dec.arrayIndex++ + } + return 0, dec.raiseInvalidJSONErr(dec.cursor) + case 'n': + // is null + dec.cursor++ + err := dec.assertNull() + if err != nil { + return 0, err + } + return dec.cursor, nil + case '{', '"', 'f', 't', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + // can't unmarshall to struct + // we skip array and set Error + dec.err = dec.makeInvalidUnmarshalErr((UnmarshalerJSONArray)(nil)) + err := dec.skipData() + if err != nil { + return 0, err + } + return dec.cursor, nil + default: + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + } + return 0, dec.raiseInvalidJSONErr(dec.cursor) +} + +func (dec *Decoder) skipArray() (int, error) { + var arraysOpen = 1 + var arraysClosed = 0 + // var stringOpen byte = 0 + for j := dec.cursor; j < dec.length || dec.read(); j++ { + switch dec.data[j] { + case ']': + arraysClosed++ + // everything is closed return + if arraysOpen == arraysClosed { + // add char to object data + return j + 1, nil + } + case '[': + arraysOpen++ + case '"': + j++ + var isInEscapeSeq bool + var isFirstQuote = true + for ; j < dec.length || dec.read(); j++ { + if dec.data[j] != '"' { + continue + } + if dec.data[j-1] != '\\' || (!isInEscapeSeq && !isFirstQuote) { + break + } else { + isInEscapeSeq = false + } + if isFirstQuote { + isFirstQuote = false + } + // loop backward and count how many anti slash found + // to see if string is effectively escaped + ct := 0 + for i := j - 1; i > 0; i-- { + if dec.data[i] != '\\' { + break + } + ct++ + } + // is pair number of slashes, quote is not escaped + if ct&1 == 0 { + break + } + isInEscapeSeq = true + } + default: + continue + } + } + return 0, dec.raiseInvalidJSONErr(dec.cursor) +} + +// DecodeArrayFunc is a func type implementing UnmarshalerJSONArray. +// Use it to cast a `func(*Decoder) error` to Unmarshal an array on the fly. + +type DecodeArrayFunc func(*Decoder) error + +// UnmarshalJSONArray implements UnmarshalerJSONArray. +func (f DecodeArrayFunc) UnmarshalJSONArray(dec *Decoder) error { + return f(dec) +} + +// IsNil implements UnmarshalerJSONArray. +func (f DecodeArrayFunc) IsNil() bool { + return f == nil +} + +// Add Values functions + +// AddArray decodes the JSON value within an object or an array to a UnmarshalerJSONArray. +func (dec *Decoder) AddArray(v UnmarshalerJSONArray) error { + return dec.Array(v) +} + +// AddArrayNull decodes the JSON value within an object or an array to a UnmarshalerJSONArray. +func (dec *Decoder) AddArrayNull(v interface{}) error { + return dec.ArrayNull(v) +} + +// Array decodes the JSON value within an object or an array to a UnmarshalerJSONArray. +func (dec *Decoder) Array(v UnmarshalerJSONArray) error { + newCursor, err := dec.decodeArray(v) + if err != nil { + return err + } + dec.cursor = newCursor + dec.called |= 1 + return nil +} + +// ArrayNull decodes the JSON value within an object or an array to a UnmarshalerJSONArray. +// v should be a pointer to an UnmarshalerJSONArray, +// if `null` value is encountered in JSON, it will leave the value v untouched, +// else it will create a new instance of the UnmarshalerJSONArray behind v. +func (dec *Decoder) ArrayNull(v interface{}) error { + newCursor, err := dec.decodeArrayNull(v) + if err != nil { + return err + } + dec.cursor = newCursor + dec.called |= 1 + return nil +} + +// Index returns the index of an array being decoded. +func (dec *Decoder) Index() int { + return dec.arrayIndex +} diff --git a/vendor/github.com/francoispqt/gojay/decode_bool.go b/vendor/github.com/francoispqt/gojay/decode_bool.go new file mode 100644 index 0000000..1dc304b --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/decode_bool.go @@ -0,0 +1,241 @@ +package gojay + +// DecodeBool reads the next JSON-encoded value from the decoder's input (io.Reader) +// and stores it in the boolean pointed to by v. +// +// See the documentation for Unmarshal for details about the conversion of JSON into a Go value. +func (dec *Decoder) DecodeBool(v *bool) error { + if dec.isPooled == 1 { + panic(InvalidUsagePooledDecoderError("Invalid usage of pooled decoder")) + } + return dec.decodeBool(v) +} +func (dec *Decoder) decodeBool(v *bool) error { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case ' ', '\n', '\t', '\r', ',': + continue + case 't': + dec.cursor++ + err := dec.assertTrue() + if err != nil { + return err + } + *v = true + return nil + case 'f': + dec.cursor++ + err := dec.assertFalse() + if err != nil { + return err + } + *v = false + return nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + *v = false + return nil + default: + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + } + } + return nil +} +func (dec *Decoder) decodeBoolNull(v **bool) error { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case ' ', '\n', '\t', '\r', ',': + continue + case 't': + dec.cursor++ + err := dec.assertTrue() + if err != nil { + return err + } + if *v == nil { + *v = new(bool) + } + **v = true + return nil + case 'f': + dec.cursor++ + err := dec.assertFalse() + if err != nil { + return err + } + if *v == nil { + *v = new(bool) + } + **v = false + return nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + return nil + default: + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + } + } + return nil +} + +func (dec *Decoder) assertTrue() error { + i := 0 + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch i { + case 0: + if dec.data[dec.cursor] != 'r' { + return dec.raiseInvalidJSONErr(dec.cursor) + } + case 1: + if dec.data[dec.cursor] != 'u' { + return dec.raiseInvalidJSONErr(dec.cursor) + } + case 2: + if dec.data[dec.cursor] != 'e' { + return dec.raiseInvalidJSONErr(dec.cursor) + } + case 3: + switch dec.data[dec.cursor] { + case ' ', '\b', '\t', '\n', ',', ']', '}': + // dec.cursor-- + return nil + default: + return dec.raiseInvalidJSONErr(dec.cursor) + } + } + i++ + } + if i == 3 { + return nil + } + return dec.raiseInvalidJSONErr(dec.cursor) +} + +func (dec *Decoder) assertNull() error { + i := 0 + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch i { + case 0: + if dec.data[dec.cursor] != 'u' { + return dec.raiseInvalidJSONErr(dec.cursor) + } + case 1: + if dec.data[dec.cursor] != 'l' { + return dec.raiseInvalidJSONErr(dec.cursor) + } + case 2: + if dec.data[dec.cursor] != 'l' { + return dec.raiseInvalidJSONErr(dec.cursor) + } + case 3: + switch dec.data[dec.cursor] { + case ' ', '\t', '\n', ',', ']', '}': + // dec.cursor-- + return nil + default: + return dec.raiseInvalidJSONErr(dec.cursor) + } + } + i++ + } + if i == 3 { + return nil + } + return dec.raiseInvalidJSONErr(dec.cursor) +} + +func (dec *Decoder) assertFalse() error { + i := 0 + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch i { + case 0: + if dec.data[dec.cursor] != 'a' { + return dec.raiseInvalidJSONErr(dec.cursor) + } + case 1: + if dec.data[dec.cursor] != 'l' { + return dec.raiseInvalidJSONErr(dec.cursor) + } + case 2: + if dec.data[dec.cursor] != 's' { + return dec.raiseInvalidJSONErr(dec.cursor) + } + case 3: + if dec.data[dec.cursor] != 'e' { + return dec.raiseInvalidJSONErr(dec.cursor) + } + case 4: + switch dec.data[dec.cursor] { + case ' ', '\t', '\n', ',', ']', '}': + // dec.cursor-- + return nil + default: + return dec.raiseInvalidJSONErr(dec.cursor) + } + } + i++ + } + if i == 4 { + return nil + } + return dec.raiseInvalidJSONErr(dec.cursor) +} + +// Add Values functions + +// AddBool decodes the JSON value within an object or an array to a *bool. +// If next key is neither null nor a JSON boolean, an InvalidUnmarshalError will be returned. +// If next key is null, bool will be false. +func (dec *Decoder) AddBool(v *bool) error { + return dec.Bool(v) +} + +// AddBoolNull decodes the JSON value within an object or an array to a *bool. +// If next key is neither null nor a JSON boolean, an InvalidUnmarshalError will be returned. +// If next key is null, bool will be false. +// If a `null` is encountered, gojay does not change the value of the pointer. +func (dec *Decoder) AddBoolNull(v **bool) error { + return dec.BoolNull(v) +} + +// Bool decodes the JSON value within an object or an array to a *bool. +// If next key is neither null nor a JSON boolean, an InvalidUnmarshalError will be returned. +// If next key is null, bool will be false. +func (dec *Decoder) Bool(v *bool) error { + err := dec.decodeBool(v) + if err != nil { + return err + } + dec.called |= 1 + return nil +} + +// BoolNull decodes the JSON value within an object or an array to a *bool. +// If next key is neither null nor a JSON boolean, an InvalidUnmarshalError will be returned. +// If next key is null, bool will be false. +func (dec *Decoder) BoolNull(v **bool) error { + err := dec.decodeBoolNull(v) + if err != nil { + return err + } + dec.called |= 1 + return nil +} diff --git a/vendor/github.com/francoispqt/gojay/decode_embedded_json.go b/vendor/github.com/francoispqt/gojay/decode_embedded_json.go new file mode 100644 index 0000000..67fcc2e --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/decode_embedded_json.go @@ -0,0 +1,85 @@ +package gojay + +// EmbeddedJSON is a raw encoded JSON value. +// It can be used to delay JSON decoding or precompute a JSON encoding. +type EmbeddedJSON []byte + +func (dec *Decoder) decodeEmbeddedJSON(ej *EmbeddedJSON) error { + var err error + if ej == nil { + return InvalidUnmarshalError("Invalid nil pointer given") + } + var beginOfEmbeddedJSON int + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case ' ', '\n', '\t', '\r', ',': + continue + // is null + case 'n': + beginOfEmbeddedJSON = dec.cursor + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + case 't': + beginOfEmbeddedJSON = dec.cursor + dec.cursor++ + err := dec.assertTrue() + if err != nil { + return err + } + // is false + case 'f': + beginOfEmbeddedJSON = dec.cursor + dec.cursor++ + err := dec.assertFalse() + if err != nil { + return err + } + // is an object + case '{': + beginOfEmbeddedJSON = dec.cursor + dec.cursor = dec.cursor + 1 + dec.cursor, err = dec.skipObject() + // is string + case '"': + beginOfEmbeddedJSON = dec.cursor + dec.cursor = dec.cursor + 1 + err = dec.skipString() // why no new dec.cursor in result? + // is array + case '[': + beginOfEmbeddedJSON = dec.cursor + dec.cursor = dec.cursor + 1 + dec.cursor, err = dec.skipArray() + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': + beginOfEmbeddedJSON = dec.cursor + dec.cursor, err = dec.skipNumber() + } + break + } + if err == nil { + if dec.cursor-1 >= beginOfEmbeddedJSON { + *ej = append(*ej, dec.data[beginOfEmbeddedJSON:dec.cursor]...) + } + dec.called |= 1 + } + return err +} + +// AddEmbeddedJSON adds an EmbeddedsJSON to the value pointed by v. +// It can be used to delay JSON decoding or precompute a JSON encoding. +func (dec *Decoder) AddEmbeddedJSON(v *EmbeddedJSON) error { + return dec.EmbeddedJSON(v) +} + +// EmbeddedJSON adds an EmbeddedsJSON to the value pointed by v. +// It can be used to delay JSON decoding or precompute a JSON encoding. +func (dec *Decoder) EmbeddedJSON(v *EmbeddedJSON) error { + err := dec.decodeEmbeddedJSON(v) + if err != nil { + return err + } + dec.called |= 1 + return nil +} diff --git a/vendor/github.com/francoispqt/gojay/decode_interface.go b/vendor/github.com/francoispqt/gojay/decode_interface.go new file mode 100644 index 0000000..015790d --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/decode_interface.go @@ -0,0 +1,130 @@ +package gojay + +// TODO @afiune for now we are using the standard json unmarshaling but in +// the future it would be great to implement one here inside this repo +import "encoding/json" + +// DecodeInterface reads the next JSON-encoded value from the decoder's input (io.Reader) and stores it in the value pointed to by i. +// +// i must be an interface poiter +func (dec *Decoder) DecodeInterface(i *interface{}) error { + if dec.isPooled == 1 { + panic(InvalidUsagePooledDecoderError("Invalid usage of pooled decoder")) + } + err := dec.decodeInterface(i) + return err +} + +func (dec *Decoder) decodeInterface(i *interface{}) error { + start, end, err := dec.getObject() + if err != nil { + dec.cursor = start + return err + } + + // if start & end are equal the object is a null, don't unmarshal + if start == end { + return nil + } + + object := dec.data[start:end] + if err = json.Unmarshal(object, i); err != nil { + return err + } + + dec.cursor = end + return nil +} + +// @afiune Maybe return the type as well? +func (dec *Decoder) getObject() (start int, end int, err error) { + // start cursor + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case ' ', '\n', '\t', '\r', ',': + continue + // is null + case 'n': + dec.cursor++ + err = dec.assertNull() + if err != nil { + return + } + // Set start & end to the same cursor to indicate the object + // is a null and should not be unmarshal + start = dec.cursor + end = dec.cursor + return + case 't': + start = dec.cursor + dec.cursor++ + err = dec.assertTrue() + if err != nil { + return + } + end = dec.cursor + dec.cursor++ + return + // is false + case 'f': + start = dec.cursor + dec.cursor++ + err = dec.assertFalse() + if err != nil { + return + } + end = dec.cursor + dec.cursor++ + return + // is an object + case '{': + start = dec.cursor + dec.cursor++ + end, err = dec.skipObject() + dec.cursor = end + return + // is string + case '"': + start = dec.cursor + dec.cursor++ + start, end, err = dec.getString() + start-- + dec.cursor = end + return + // is array + case '[': + start = dec.cursor + dec.cursor++ + end, err = dec.skipArray() + dec.cursor = end + return + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': + start = dec.cursor + end, err = dec.skipNumber() + dec.cursor = end + return + default: + err = dec.raiseInvalidJSONErr(dec.cursor) + return + } + } + err = dec.raiseInvalidJSONErr(dec.cursor) + return +} + +// Add Values functions + +// AddInterface decodes the JSON value within an object or an array to a interface{}. +func (dec *Decoder) AddInterface(v *interface{}) error { + return dec.Interface(v) +} + +// Interface decodes the JSON value within an object or an array to an interface{}. +func (dec *Decoder) Interface(value *interface{}) error { + err := dec.decodeInterface(value) + if err != nil { + return err + } + dec.called |= 1 + return nil +} diff --git a/vendor/github.com/francoispqt/gojay/decode_number.go b/vendor/github.com/francoispqt/gojay/decode_number.go new file mode 100644 index 0000000..0042b47 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/decode_number.go @@ -0,0 +1,118 @@ +package gojay + +import ( + "math" +) + +var digits []int8 + +const maxInt64toMultiply = math.MaxInt64 / 10 +const maxInt32toMultiply = math.MaxInt32 / 10 +const maxInt16toMultiply = math.MaxInt16 / 10 +const maxInt8toMultiply = math.MaxInt8 / 10 +const maxUint8toMultiply = math.MaxUint8 / 10 +const maxUint16toMultiply = math.MaxUint16 / 10 +const maxUint32toMultiply = math.MaxUint32 / 10 +const maxUint64toMultiply = math.MaxUint64 / 10 +const maxUint32Length = 10 +const maxUint64Length = 20 +const maxUint16Length = 5 +const maxUint8Length = 3 +const maxInt32Length = 10 +const maxInt64Length = 19 +const maxInt16Length = 5 +const maxInt8Length = 3 +const invalidNumber = int8(-1) + +var pow10uint64 = [21]uint64{ + 0, + 1, + 10, + 100, + 1000, + 10000, + 100000, + 1000000, + 10000000, + 100000000, + 1000000000, + 10000000000, + 100000000000, + 1000000000000, + 10000000000000, + 100000000000000, + 1000000000000000, + 10000000000000000, + 100000000000000000, + 1000000000000000000, + 10000000000000000000, +} + +var skipNumberEndCursorIncrement [256]int + +func init() { + digits = make([]int8, 256) + for i := 0; i < len(digits); i++ { + digits[i] = invalidNumber + } + for i := int8('0'); i <= int8('9'); i++ { + digits[i] = i - int8('0') + } + + for i := 0; i < 256; i++ { + switch i { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.', 'e', 'E', '+', '-': + skipNumberEndCursorIncrement[i] = 1 + } + } +} + +func (dec *Decoder) skipNumber() (int, error) { + end := dec.cursor + 1 + // look for following numbers + for j := dec.cursor + 1; j < dec.length || dec.read(); j++ { + end += skipNumberEndCursorIncrement[dec.data[j]] + + switch dec.data[j] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.', 'e', 'E', '+', '-', ' ', '\n', '\t', '\r': + continue + case ',', '}', ']': + return end, nil + default: + // invalid json we expect numbers, dot (single one), comma, or spaces + return end, dec.raiseInvalidJSONErr(dec.cursor) + } + } + + return end, nil +} + +func (dec *Decoder) getExponent() (int64, error) { + start := dec.cursor + end := dec.cursor + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { // is positive + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + end = dec.cursor + 1 + case '-': + dec.cursor++ + exp, err := dec.getExponent() + return -exp, err + case '+': + dec.cursor++ + return dec.getExponent() + default: + // if nothing return 0 + // could raise error + if start == end { + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + return dec.atoi64(start, end-1), nil + } + } + if start == end { + + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + return dec.atoi64(start, end-1), nil +} diff --git a/vendor/github.com/francoispqt/gojay/decode_number_float.go b/vendor/github.com/francoispqt/gojay/decode_number_float.go new file mode 100644 index 0000000..f76c586 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/decode_number_float.go @@ -0,0 +1,516 @@ +package gojay + +// DecodeFloat64 reads the next JSON-encoded value from the decoder's input (io.Reader) and stores it in the float64 pointed to by v. +// +// See the documentation for Unmarshal for details about the conversion of JSON into a Go value. +func (dec *Decoder) DecodeFloat64(v *float64) error { + if dec.isPooled == 1 { + panic(InvalidUsagePooledDecoderError("Invalid usage of pooled decoder")) + } + return dec.decodeFloat64(v) +} +func (dec *Decoder) decodeFloat64(v *float64) error { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch c := dec.data[dec.cursor]; c { + case ' ', '\n', '\t', '\r', ',': + continue + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + val, err := dec.getFloat() + if err != nil { + return err + } + *v = val + return nil + case '-': + dec.cursor = dec.cursor + 1 + val, err := dec.getFloatNegative() + if err != nil { + return err + } + *v = -val + return nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + return nil + default: + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + } + } + return dec.raiseInvalidJSONErr(dec.cursor) +} +func (dec *Decoder) decodeFloat64Null(v **float64) error { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch c := dec.data[dec.cursor]; c { + case ' ', '\n', '\t', '\r', ',': + continue + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + val, err := dec.getFloat() + if err != nil { + return err + } + if *v == nil { + *v = new(float64) + } + **v = val + return nil + case '-': + dec.cursor = dec.cursor + 1 + val, err := dec.getFloatNegative() + if err != nil { + return err + } + if *v == nil { + *v = new(float64) + } + **v = -val + return nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + return nil + default: + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + } + } + return dec.raiseInvalidJSONErr(dec.cursor) +} + +func (dec *Decoder) getFloatNegative() (float64, error) { + // look for following numbers + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return dec.getFloat() + default: + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + } + return 0, dec.raiseInvalidJSONErr(dec.cursor) +} + +func (dec *Decoder) getFloat() (float64, error) { + var end = dec.cursor + var start = dec.cursor + // look for following numbers + for j := dec.cursor + 1; j < dec.length || dec.read(); j++ { + switch dec.data[j] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + end = j + continue + case '.': + // we get part before decimal as integer + beforeDecimal := dec.atoi64(start, end) + // then we get part after decimal as integer + start = j + 1 + // get number after the decimal point + for i := j + 1; i < dec.length || dec.read(); i++ { + c := dec.data[i] + if isDigit(c) { + end = i + // multiply the before decimal point portion by 10 using bitwise + // make sure it doesn't overflow + if end-start < 18 { + beforeDecimal = (beforeDecimal << 3) + (beforeDecimal << 1) + } + continue + } else if (c == 'e' || c == 'E') && j < i-1 { + // we have an exponent, convert first the value we got before the exponent + var afterDecimal int64 + expI := end - start + 2 + // if exp is too long, it means number is too long, just truncate the number + if expI >= len(pow10uint64) || expI < 0 { + expI = len(pow10uint64) - 2 + afterDecimal = dec.atoi64(start, start+expI-2) + } else { + // then we add both integers + // then we divide the number by the power found + afterDecimal = dec.atoi64(start, end) + } + dec.cursor = i + 1 + pow := pow10uint64[expI] + floatVal := float64(beforeDecimal+afterDecimal) / float64(pow) + exp, err := dec.getExponent() + if err != nil { + return 0, err + } + pExp := (exp + (exp >> 31)) ^ (exp >> 31) + 1 // absolute exponent + if pExp >= int64(len(pow10uint64)) || pExp < 0 { + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + // if exponent is negative + if exp < 0 { + return float64(floatVal) * (1 / float64(pow10uint64[pExp])), nil + } + return float64(floatVal) * float64(pow10uint64[pExp]), nil + } + dec.cursor = i + break + } + if end >= dec.length || end < start { + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + var afterDecimal int64 + expI := end - start + 2 + // if exp is too long, it means number is too long, just truncate the number + if expI >= len(pow10uint64) || expI < 0 { + expI = 19 + afterDecimal = dec.atoi64(start, start+expI-2) + } else { + afterDecimal = dec.atoi64(start, end) + } + + pow := pow10uint64[expI] + // then we add both integers + // then we divide the number by the power found + return float64(beforeDecimal+afterDecimal) / float64(pow), nil + case 'e', 'E': + dec.cursor = j + 1 + // we get part before decimal as integer + beforeDecimal := uint64(dec.atoi64(start, end)) + // get exponent + exp, err := dec.getExponent() + if err != nil { + return 0, err + } + pExp := (exp + (exp >> 31)) ^ (exp >> 31) + 1 // abs + if pExp >= int64(len(pow10uint64)) || pExp < 0 { + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + // if exponent is negative + if exp < 0 { + return float64(beforeDecimal) * (1 / float64(pow10uint64[pExp])), nil + } + return float64(beforeDecimal) * float64(pow10uint64[pExp]), nil + case ' ', '\n', '\t', '\r', ',', '}', ']': // does not have decimal + dec.cursor = j + return float64(dec.atoi64(start, end)), nil + } + // invalid json we expect numbers, dot (single one), comma, or spaces + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + return float64(dec.atoi64(start, end)), nil +} + +// DecodeFloat32 reads the next JSON-encoded value from the decoder's input (io.Reader) and stores it in the float32 pointed to by v. +// +// See the documentation for Unmarshal for details about the conversion of JSON into a Go value. +func (dec *Decoder) DecodeFloat32(v *float32) error { + if dec.isPooled == 1 { + panic(InvalidUsagePooledDecoderError("Invalid usage of pooled decoder")) + } + return dec.decodeFloat32(v) +} +func (dec *Decoder) decodeFloat32(v *float32) error { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch c := dec.data[dec.cursor]; c { + case ' ', '\n', '\t', '\r', ',': + continue + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + val, err := dec.getFloat32() + if err != nil { + return err + } + *v = val + return nil + case '-': + dec.cursor = dec.cursor + 1 + val, err := dec.getFloat32Negative() + if err != nil { + return err + } + *v = -val + return nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + return nil + default: + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + } + } + return dec.raiseInvalidJSONErr(dec.cursor) +} +func (dec *Decoder) decodeFloat32Null(v **float32) error { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch c := dec.data[dec.cursor]; c { + case ' ', '\n', '\t', '\r', ',': + continue + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + val, err := dec.getFloat32() + if err != nil { + return err + } + if *v == nil { + *v = new(float32) + } + **v = val + return nil + case '-': + dec.cursor = dec.cursor + 1 + val, err := dec.getFloat32Negative() + if err != nil { + return err + } + if *v == nil { + *v = new(float32) + } + **v = -val + return nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + return nil + default: + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + } + } + return dec.raiseInvalidJSONErr(dec.cursor) +} + +func (dec *Decoder) getFloat32Negative() (float32, error) { + // look for following numbers + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return dec.getFloat32() + default: + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + } + return 0, dec.raiseInvalidJSONErr(dec.cursor) +} + +func (dec *Decoder) getFloat32() (float32, error) { + var end = dec.cursor + var start = dec.cursor + // look for following numbers + for j := dec.cursor + 1; j < dec.length || dec.read(); j++ { + switch dec.data[j] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + end = j + continue + case '.': + // we get part before decimal as integer + beforeDecimal := dec.atoi64(start, end) + // then we get part after decimal as integer + start = j + 1 + // get number after the decimal point + // multiple the before decimal point portion by 10 using bitwise + for i := j + 1; i < dec.length || dec.read(); i++ { + c := dec.data[i] + if isDigit(c) { + end = i + // multiply the before decimal point portion by 10 using bitwise + // make sure it desn't overflow + if end-start < 9 { + beforeDecimal = (beforeDecimal << 3) + (beforeDecimal << 1) + } + continue + } else if (c == 'e' || c == 'E') && j < i-1 { + // we get the number before decimal + var afterDecimal int64 + expI := end - start + 2 + // if exp is too long, it means number is too long, just truncate the number + if expI >= 12 || expI < 0 { + expI = 10 + afterDecimal = dec.atoi64(start, start+expI-2) + } else { + afterDecimal = dec.atoi64(start, end) + } + dec.cursor = i + 1 + pow := pow10uint64[expI] + // then we add both integers + // then we divide the number by the power found + floatVal := float32(beforeDecimal+afterDecimal) / float32(pow) + exp, err := dec.getExponent() + if err != nil { + return 0, err + } + pExp := (exp + (exp >> 31)) ^ (exp >> 31) + 1 // abs + if pExp >= int64(len(pow10uint64)) || pExp < 0 { + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + // if exponent is negative + if exp < 0 { + return float32(floatVal) * (1 / float32(pow10uint64[pExp])), nil + } + return float32(floatVal) * float32(pow10uint64[pExp]), nil + } + dec.cursor = i + break + } + if end >= dec.length || end < start { + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + // then we add both integers + // then we divide the number by the power found + var afterDecimal int64 + expI := end - start + 2 + // if exp is too long, it means number is too long, just truncate the number + if expI >= 12 || expI < 0 { + expI = 10 + afterDecimal = dec.atoi64(start, start+expI-2) + } else { + // then we add both integers + // then we divide the number by the power found + afterDecimal = dec.atoi64(start, end) + } + pow := pow10uint64[expI] + return float32(beforeDecimal+afterDecimal) / float32(pow), nil + case 'e', 'E': + dec.cursor = j + 1 + // we get part before decimal as integer + beforeDecimal := dec.atoi64(start, end) + // get exponent + exp, err := dec.getExponent() + if err != nil { + return 0, err + } + pExp := (exp + (exp >> 31)) ^ (exp >> 31) + 1 + if pExp >= int64(len(pow10uint64)) || pExp < 0 { + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + // if exponent is negative + if exp < 0 { + return float32(beforeDecimal) * (1 / float32(pow10uint64[pExp])), nil + } + return float32(beforeDecimal) * float32(pow10uint64[pExp]), nil + case ' ', '\n', '\t', '\r', ',', '}', ']': // does not have decimal + dec.cursor = j + return float32(dec.atoi64(start, end)), nil + } + // invalid json we expect numbers, dot (single one), comma, or spaces + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + return float32(dec.atoi64(start, end)), nil +} + +// Add Values functions + +// AddFloat decodes the JSON value within an object or an array to a *float64. +// If next key value overflows float64, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) AddFloat(v *float64) error { + return dec.Float64(v) +} + +// AddFloatNull decodes the JSON value within an object or an array to a *float64. +// If next key value overflows float64, an InvalidUnmarshalError error will be returned. +// If a `null` is encountered, gojay does not change the value of the pointer. +func (dec *Decoder) AddFloatNull(v **float64) error { + return dec.Float64Null(v) +} + +// AddFloat64 decodes the JSON value within an object or an array to a *float64. +// If next key value overflows float64, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) AddFloat64(v *float64) error { + return dec.Float64(v) +} + +// AddFloat64Null decodes the JSON value within an object or an array to a *float64. +// If next key value overflows float64, an InvalidUnmarshalError error will be returned. +// If a `null` is encountered, gojay does not change the value of the pointer. +func (dec *Decoder) AddFloat64Null(v **float64) error { + return dec.Float64Null(v) +} + +// AddFloat32 decodes the JSON value within an object or an array to a *float64. +// If next key value overflows float64, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) AddFloat32(v *float32) error { + return dec.Float32(v) +} + +// AddFloat32Null decodes the JSON value within an object or an array to a *float64. +// If next key value overflows float64, an InvalidUnmarshalError error will be returned. +// If a `null` is encountered, gojay does not change the value of the pointer. +func (dec *Decoder) AddFloat32Null(v **float32) error { + return dec.Float32Null(v) +} + +// Float decodes the JSON value within an object or an array to a *float64. +// If next key value overflows float64, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) Float(v *float64) error { + return dec.Float64(v) +} + +// FloatNull decodes the JSON value within an object or an array to a *float64. +// If next key value overflows float64, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) FloatNull(v **float64) error { + return dec.Float64Null(v) +} + +// Float64 decodes the JSON value within an object or an array to a *float64. +// If next key value overflows float64, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) Float64(v *float64) error { + err := dec.decodeFloat64(v) + if err != nil { + return err + } + dec.called |= 1 + return nil +} + +// Float64Null decodes the JSON value within an object or an array to a *float64. +// If next key value overflows float64, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) Float64Null(v **float64) error { + err := dec.decodeFloat64Null(v) + if err != nil { + return err + } + dec.called |= 1 + return nil +} + +// Float32 decodes the JSON value within an object or an array to a *float64. +// If next key value overflows float64, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) Float32(v *float32) error { + err := dec.decodeFloat32(v) + if err != nil { + return err + } + dec.called |= 1 + return nil +} + +// Float32Null decodes the JSON value within an object or an array to a *float64. +// If next key value overflows float64, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) Float32Null(v **float32) error { + err := dec.decodeFloat32Null(v) + if err != nil { + return err + } + dec.called |= 1 + return nil +} diff --git a/vendor/github.com/francoispqt/gojay/decode_number_int.go b/vendor/github.com/francoispqt/gojay/decode_number_int.go new file mode 100644 index 0000000..8429049 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/decode_number_int.go @@ -0,0 +1,1338 @@ +package gojay + +import ( + "fmt" + "math" +) + +// DecodeInt reads the next JSON-encoded value from the decoder's input (io.Reader) and stores it in the int pointed to by v. +// +// See the documentation for Unmarshal for details about the conversion of JSON into a Go value. +func (dec *Decoder) DecodeInt(v *int) error { + if dec.isPooled == 1 { + panic(InvalidUsagePooledDecoderError("Invalid usage of pooled decoder")) + } + return dec.decodeInt(v) +} +func (dec *Decoder) decodeInt(v *int) error { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch c := dec.data[dec.cursor]; c { + case ' ', '\n', '\t', '\r', ',': + continue + // we don't look for 0 as leading zeros are invalid per RFC + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + val, err := dec.getInt64() + if err != nil { + return err + } + *v = int(val) + return nil + case '-': + dec.cursor = dec.cursor + 1 + val, err := dec.getInt64Negative() + if err != nil { + return err + } + *v = -int(val) + return nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + return nil + default: + dec.err = InvalidUnmarshalError( + fmt.Sprintf( + "Cannot unmarshall to int, wrong char '%s' found at pos %d", + string(dec.data[dec.cursor]), + dec.cursor, + ), + ) + err := dec.skipData() + if err != nil { + return err + } + return nil + } + } + return dec.raiseInvalidJSONErr(dec.cursor) +} + +func (dec *Decoder) decodeIntNull(v **int) error { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch c := dec.data[dec.cursor]; c { + case ' ', '\n', '\t', '\r', ',': + continue + // we don't look for 0 as leading zeros are invalid per RFC + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + val, err := dec.getInt64() + if err != nil { + return err + } + if *v == nil { + *v = new(int) + } + **v = int(val) + return nil + case '-': + dec.cursor = dec.cursor + 1 + val, err := dec.getInt64Negative() + if err != nil { + return err + } + if *v == nil { + *v = new(int) + } + **v = -int(val) + return nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + return nil + default: + dec.err = InvalidUnmarshalError( + fmt.Sprintf( + "Cannot unmarshall to int, wrong char '%s' found at pos %d", + string(dec.data[dec.cursor]), + dec.cursor, + ), + ) + err := dec.skipData() + if err != nil { + return err + } + return nil + } + } + return dec.raiseInvalidJSONErr(dec.cursor) +} + +// DecodeInt16 reads the next JSON-encoded value from the decoder's input (io.Reader) and stores it in the int16 pointed to by v. +// +// See the documentation for Unmarshal for details about the conversion of JSON into a Go value. +func (dec *Decoder) DecodeInt16(v *int16) error { + if dec.isPooled == 1 { + panic(InvalidUsagePooledDecoderError("Invalid usage of pooled decoder")) + } + return dec.decodeInt16(v) +} +func (dec *Decoder) decodeInt16(v *int16) error { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch c := dec.data[dec.cursor]; c { + case ' ', '\n', '\t', '\r', ',': + continue + // we don't look for 0 as leading zeros are invalid per RFC + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + val, err := dec.getInt16() + if err != nil { + return err + } + *v = val + return nil + case '-': + dec.cursor = dec.cursor + 1 + val, err := dec.getInt16Negative() + if err != nil { + return err + } + *v = -val + return nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + return nil + default: + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + } + } + return dec.raiseInvalidJSONErr(dec.cursor) +} +func (dec *Decoder) decodeInt16Null(v **int16) error { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch c := dec.data[dec.cursor]; c { + case ' ', '\n', '\t', '\r', ',': + continue + // we don't look for 0 as leading zeros are invalid per RFC + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + val, err := dec.getInt16() + if err != nil { + return err + } + if *v == nil { + *v = new(int16) + } + **v = val + return nil + case '-': + dec.cursor = dec.cursor + 1 + val, err := dec.getInt16Negative() + if err != nil { + return err + } + if *v == nil { + *v = new(int16) + } + **v = -val + return nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + return nil + default: + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + } + } + return dec.raiseInvalidJSONErr(dec.cursor) +} + +func (dec *Decoder) getInt16Negative() (int16, error) { + // look for following numbers + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case '1', '2', '3', '4', '5', '6', '7', '8', '9': + return dec.getInt16() + default: + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + } + return 0, dec.raiseInvalidJSONErr(dec.cursor) +} + +func (dec *Decoder) getInt16() (int16, error) { + var end = dec.cursor + var start = dec.cursor + // look for following numbers + for j := dec.cursor + 1; j < dec.length || dec.read(); j++ { + switch dec.data[j] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + end = j + continue + case '.': + // if dot is found + // look for exponent (e,E) as exponent can change the + // way number should be parsed to int. + // if no exponent found, just unmarshal the number before decimal point + j++ + startDecimal := j + endDecimal := j - 1 + for ; j < dec.length || dec.read(); j++ { + switch dec.data[j] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + endDecimal = j + continue + case 'e', 'E': + if startDecimal > endDecimal { + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + dec.cursor = j + 1 + // can try unmarshalling to int as Exponent might change decimal number to non decimal + // let's get the float value first + // we get part before decimal as integer + beforeDecimal := dec.atoi16(start, end) + // get number after the decimal point + // multiple the before decimal point portion by 10 using bitwise + for i := startDecimal; i <= endDecimal; i++ { + beforeDecimal = (beforeDecimal << 3) + (beforeDecimal << 1) + } + // then we add both integers + // then we divide the number by the power found + afterDecimal := dec.atoi16(startDecimal, endDecimal) + expI := endDecimal - startDecimal + 2 + if expI >= len(pow10uint64) || expI < 0 { + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + pow := pow10uint64[expI] + floatVal := float64(beforeDecimal+afterDecimal) / float64(pow) + // we have the floating value, now multiply by the exponent + exp, err := dec.getExponent() + if err != nil { + return 0, err + } + pExp := (exp + (exp >> 31)) ^ (exp >> 31) + 1 // abs + if pExp >= int64(len(pow10uint64)) || pExp < 0 { + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + val := floatVal * float64(pow10uint64[pExp]) + return int16(val), nil + case ' ', '\t', '\n', ',', ']', '}': + dec.cursor = j + return dec.atoi16(start, end), nil + default: + dec.cursor = j + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + } + return dec.atoi16(start, end), nil + case 'e', 'E': + // get init n + dec.cursor = j + 1 + return dec.getInt16WithExp(dec.atoi16(start, end)) + case ' ', '\n', '\t', '\r', ',', '}', ']': + dec.cursor = j + return dec.atoi16(start, end), nil + } + // invalid json we expect numbers, dot (single one), comma, or spaces + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + return dec.atoi16(start, end), nil +} + +func (dec *Decoder) getInt16WithExp(init int16) (int16, error) { + var exp uint16 + var sign = int16(1) + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case '+': + continue + case '-': + sign = -1 + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + uintv := uint16(digits[dec.data[dec.cursor]]) + exp = (exp << 3) + (exp << 1) + uintv + dec.cursor++ + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + uintv := uint16(digits[dec.data[dec.cursor]]) + exp = (exp << 3) + (exp << 1) + uintv + case ' ', '\t', '\n', '}', ',', ']': + exp = exp + 1 + if exp >= uint16(len(pow10uint64)) { + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + if sign == -1 { + return init * (1 / int16(pow10uint64[exp])), nil + } + return init * int16(pow10uint64[exp]), nil + default: + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + } + exp = exp + 1 + if exp >= uint16(len(pow10uint64)) { + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + if sign == -1 { + return init * (1 / int16(pow10uint64[exp])), nil + } + return init * int16(pow10uint64[exp]), nil + default: + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + } + return 0, dec.raiseInvalidJSONErr(dec.cursor) +} + +// DecodeInt8 reads the next JSON-encoded value from the decoder's input (io.Reader) and stores it in the int8 pointed to by v. +// +// See the documentation for Unmarshal for details about the conversion of JSON into a Go value. +func (dec *Decoder) DecodeInt8(v *int8) error { + if dec.isPooled == 1 { + panic(InvalidUsagePooledDecoderError("Invalid usage of pooled decoder")) + } + return dec.decodeInt8(v) +} +func (dec *Decoder) decodeInt8(v *int8) error { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch c := dec.data[dec.cursor]; c { + case ' ', '\n', '\t', '\r', ',': + continue + // we don't look for 0 as leading zeros are invalid per RFC + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + val, err := dec.getInt8() + if err != nil { + return err + } + *v = val + return nil + case '-': + dec.cursor = dec.cursor + 1 + val, err := dec.getInt8Negative() + if err != nil { + return err + } + *v = -val + return nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + return nil + default: + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + } + } + return dec.raiseInvalidJSONErr(dec.cursor) +} +func (dec *Decoder) decodeInt8Null(v **int8) error { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch c := dec.data[dec.cursor]; c { + case ' ', '\n', '\t', '\r', ',': + continue + // we don't look for 0 as leading zeros are invalid per RFC + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + val, err := dec.getInt8() + if err != nil { + return err + } + if *v == nil { + *v = new(int8) + } + **v = val + return nil + case '-': + dec.cursor = dec.cursor + 1 + val, err := dec.getInt8Negative() + if err != nil { + return err + } + if *v == nil { + *v = new(int8) + } + **v = -val + return nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + return nil + default: + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + } + } + return dec.raiseInvalidJSONErr(dec.cursor) +} + +func (dec *Decoder) getInt8Negative() (int8, error) { + // look for following numbers + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case '1', '2', '3', '4', '5', '6', '7', '8', '9': + return dec.getInt8() + default: + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + } + return 0, dec.raiseInvalidJSONErr(dec.cursor) +} + +func (dec *Decoder) getInt8() (int8, error) { + var end = dec.cursor + var start = dec.cursor + // look for following numbers + for j := dec.cursor + 1; j < dec.length || dec.read(); j++ { + switch dec.data[j] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + end = j + continue + case '.': + // if dot is found + // look for exponent (e,E) as exponent can change the + // way number should be parsed to int. + // if no exponent found, just unmarshal the number before decimal point + j++ + startDecimal := j + endDecimal := j - 1 + for ; j < dec.length || dec.read(); j++ { + switch dec.data[j] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + endDecimal = j + continue + case 'e', 'E': + if startDecimal > endDecimal { + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + dec.cursor = j + 1 + // can try unmarshalling to int as Exponent might change decimal number to non decimal + // let's get the float value first + // we get part before decimal as integer + beforeDecimal := dec.atoi8(start, end) + // get number after the decimal point + // multiple the before decimal point portion by 10 using bitwise + for i := startDecimal; i <= endDecimal; i++ { + beforeDecimal = (beforeDecimal << 3) + (beforeDecimal << 1) + } + // then we add both integers + // then we divide the number by the power found + afterDecimal := dec.atoi8(startDecimal, endDecimal) + expI := endDecimal - startDecimal + 2 + if expI >= len(pow10uint64) || expI < 0 { + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + pow := pow10uint64[expI] + floatVal := float64(beforeDecimal+afterDecimal) / float64(pow) + // we have the floating value, now multiply by the exponent + exp, err := dec.getExponent() + if err != nil { + return 0, err + } + pExp := (exp + (exp >> 31)) ^ (exp >> 31) + 1 // abs + if pExp >= int64(len(pow10uint64)) || pExp < 0 { + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + val := floatVal * float64(pow10uint64[pExp]) + return int8(val), nil + case ' ', '\t', '\n', ',', ']', '}': + dec.cursor = j + return dec.atoi8(start, end), nil + default: + dec.cursor = j + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + } + return dec.atoi8(start, end), nil + case 'e', 'E': + // get init n + dec.cursor = j + 1 + return dec.getInt8WithExp(dec.atoi8(start, end)) + case ' ', '\n', '\t', '\r', ',', '}', ']': + dec.cursor = j + return dec.atoi8(start, end), nil + } + // invalid json we expect numbers, dot (single one), comma, or spaces + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + return dec.atoi8(start, end), nil +} + +func (dec *Decoder) getInt8WithExp(init int8) (int8, error) { + var exp uint8 + var sign = int8(1) + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case '+': + continue + case '-': + sign = -1 + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + uintv := uint8(digits[dec.data[dec.cursor]]) + exp = (exp << 3) + (exp << 1) + uintv + dec.cursor++ + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + uintv := uint8(digits[dec.data[dec.cursor]]) + exp = (exp << 3) + (exp << 1) + uintv + case ' ', '\t', '\n', '}', ',', ']': + if exp+1 >= uint8(len(pow10uint64)) { + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + if sign == -1 { + return init * (1 / int8(pow10uint64[exp+1])), nil + } + return init * int8(pow10uint64[exp+1]), nil + default: + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + } + if exp+1 >= uint8(len(pow10uint64)) { + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + if sign == -1 { + return init * (1 / int8(pow10uint64[exp+1])), nil + } + return init * int8(pow10uint64[exp+1]), nil + default: + dec.err = dec.raiseInvalidJSONErr(dec.cursor) + return 0, dec.err + } + } + return 0, dec.raiseInvalidJSONErr(dec.cursor) +} + +// DecodeInt32 reads the next JSON-encoded value from the decoder's input (io.Reader) and stores it in the int32 pointed to by v. +// +// See the documentation for Unmarshal for details about the conversion of JSON into a Go value. +func (dec *Decoder) DecodeInt32(v *int32) error { + if dec.isPooled == 1 { + panic(InvalidUsagePooledDecoderError("Invalid usage of pooled decoder")) + } + return dec.decodeInt32(v) +} +func (dec *Decoder) decodeInt32(v *int32) error { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch c := dec.data[dec.cursor]; c { + case ' ', '\n', '\t', '\r', ',': + continue + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + val, err := dec.getInt32() + if err != nil { + return err + } + *v = val + return nil + case '-': + dec.cursor = dec.cursor + 1 + val, err := dec.getInt32Negative() + if err != nil { + return err + } + *v = -val + return nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + return nil + default: + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + } + } + return dec.raiseInvalidJSONErr(dec.cursor) +} +func (dec *Decoder) decodeInt32Null(v **int32) error { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch c := dec.data[dec.cursor]; c { + case ' ', '\n', '\t', '\r', ',': + continue + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + val, err := dec.getInt32() + if err != nil { + return err + } + if *v == nil { + *v = new(int32) + } + **v = val + return nil + case '-': + dec.cursor = dec.cursor + 1 + val, err := dec.getInt32Negative() + if err != nil { + return err + } + if *v == nil { + *v = new(int32) + } + **v = -val + return nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + return nil + default: + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + } + } + return dec.raiseInvalidJSONErr(dec.cursor) +} + +func (dec *Decoder) getInt32Negative() (int32, error) { + // look for following numbers + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case '1', '2', '3', '4', '5', '6', '7', '8', '9': + return dec.getInt32() + default: + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + } + return 0, dec.raiseInvalidJSONErr(dec.cursor) +} + +func (dec *Decoder) getInt32() (int32, error) { + var end = dec.cursor + var start = dec.cursor + // look for following numbers + for j := dec.cursor + 1; j < dec.length || dec.read(); j++ { + switch dec.data[j] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + end = j + continue + case '.': + // if dot is found + // look for exponent (e,E) as exponent can change the + // way number should be parsed to int. + // if no exponent found, just unmarshal the number before decimal point + j++ + startDecimal := j + endDecimal := j - 1 + for ; j < dec.length || dec.read(); j++ { + switch dec.data[j] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + endDecimal = j + continue + case 'e', 'E': + // if eg 1.E + if startDecimal > endDecimal { + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + dec.cursor = j + 1 + // can try unmarshalling to int as Exponent might change decimal number to non decimal + // let's get the float value first + // we get part before decimal as integer + beforeDecimal := dec.atoi64(start, end) + // get number after the decimal point + // multiple the before decimal point portion by 10 using bitwise + for i := startDecimal; i <= endDecimal; i++ { + beforeDecimal = (beforeDecimal << 3) + (beforeDecimal << 1) + } + // then we add both integers + // then we divide the number by the power found + afterDecimal := dec.atoi64(startDecimal, endDecimal) + expI := endDecimal - startDecimal + 2 + if expI >= len(pow10uint64) || expI < 0 { + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + pow := pow10uint64[expI] + floatVal := float64(beforeDecimal+afterDecimal) / float64(pow) + // we have the floating value, now multiply by the exponent + exp, err := dec.getExponent() + if err != nil { + return 0, err + } + pExp := (exp + (exp >> 31)) ^ (exp >> 31) + 1 // abs + if pExp >= int64(len(pow10uint64)) || pExp < 0 { + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + val := floatVal * float64(pow10uint64[pExp]) + return int32(val), nil + case ' ', '\t', '\n', ',', ']', '}': + dec.cursor = j + return dec.atoi32(start, end), nil + default: + dec.cursor = j + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + } + return dec.atoi32(start, end), nil + case 'e', 'E': + // get init n + dec.cursor = j + 1 + return dec.getInt32WithExp(dec.atoi32(start, end)) + case ' ', '\n', '\t', '\r', ',', '}', ']': + dec.cursor = j + return dec.atoi32(start, end), nil + } + // invalid json we expect numbers, dot (single one), comma, or spaces + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + return dec.atoi32(start, end), nil +} + +func (dec *Decoder) getInt32WithExp(init int32) (int32, error) { + var exp uint32 + var sign = int32(1) + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case '+': + continue + case '-': + sign = -1 + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + uintv := uint32(digits[dec.data[dec.cursor]]) + exp = (exp << 3) + (exp << 1) + uintv + dec.cursor++ + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + uintv := uint32(digits[dec.data[dec.cursor]]) + exp = (exp << 3) + (exp << 1) + uintv + case ' ', '\t', '\n', '}', ',', ']': + if exp+1 >= uint32(len(pow10uint64)) { + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + if sign == -1 { + return init * (1 / int32(pow10uint64[exp+1])), nil + } + return init * int32(pow10uint64[exp+1]), nil + default: + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + } + if exp+1 >= uint32(len(pow10uint64)) { + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + if sign == -1 { + return init * (1 / int32(pow10uint64[exp+1])), nil + } + return init * int32(pow10uint64[exp+1]), nil + default: + dec.err = dec.raiseInvalidJSONErr(dec.cursor) + return 0, dec.err + } + } + return 0, dec.raiseInvalidJSONErr(dec.cursor) +} + +// DecodeInt64 reads the next JSON-encoded value from the decoder's input (io.Reader) and stores it in the int64 pointed to by v. +// +// See the documentation for Unmarshal for details about the conversion of JSON into a Go value. +func (dec *Decoder) DecodeInt64(v *int64) error { + if dec.isPooled == 1 { + panic(InvalidUsagePooledDecoderError("Invalid usage of pooled decoder")) + } + return dec.decodeInt64(v) +} + +func (dec *Decoder) decodeInt64(v *int64) error { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch c := dec.data[dec.cursor]; c { + case ' ', '\n', '\t', '\r', ',': + continue + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + val, err := dec.getInt64() + if err != nil { + return err + } + *v = val + return nil + case '-': + dec.cursor = dec.cursor + 1 + val, err := dec.getInt64Negative() + if err != nil { + return err + } + *v = -val + return nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + return nil + default: + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + } + } + return dec.raiseInvalidJSONErr(dec.cursor) +} +func (dec *Decoder) decodeInt64Null(v **int64) error { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch c := dec.data[dec.cursor]; c { + case ' ', '\n', '\t', '\r', ',': + continue + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + val, err := dec.getInt64() + if err != nil { + return err + } + if *v == nil { + *v = new(int64) + } + **v = val + return nil + case '-': + dec.cursor = dec.cursor + 1 + val, err := dec.getInt64Negative() + if err != nil { + return err + } + if *v == nil { + *v = new(int64) + } + **v = -val + return nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + return nil + default: + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + } + } + return dec.raiseInvalidJSONErr(dec.cursor) +} + +func (dec *Decoder) getInt64Negative() (int64, error) { + // look for following numbers + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case '1', '2', '3', '4', '5', '6', '7', '8', '9': + return dec.getInt64() + default: + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + } + return 0, dec.raiseInvalidJSONErr(dec.cursor) +} + +func (dec *Decoder) getInt64() (int64, error) { + var end = dec.cursor + var start = dec.cursor + // look for following numbers + for j := dec.cursor + 1; j < dec.length || dec.read(); j++ { + switch dec.data[j] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + end = j + continue + case ' ', '\t', '\n', ',', '}', ']': + dec.cursor = j + return dec.atoi64(start, end), nil + case '.': + // if dot is found + // look for exponent (e,E) as exponent can change the + // way number should be parsed to int. + // if no exponent found, just unmarshal the number before decimal point + j++ + startDecimal := j + endDecimal := j - 1 + for ; j < dec.length || dec.read(); j++ { + switch dec.data[j] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + endDecimal = j + continue + case 'e', 'E': + // if eg 1.E + if startDecimal > endDecimal { + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + dec.cursor = j + 1 + // can try unmarshalling to int as Exponent might change decimal number to non decimal + // let's get the float value first + // we get part before decimal as integer + beforeDecimal := dec.atoi64(start, end) + // get number after the decimal point + // multiple the before decimal point portion by 10 using bitwise + for i := startDecimal; i <= endDecimal; i++ { + beforeDecimal = (beforeDecimal << 3) + (beforeDecimal << 1) + } + // then we add both integers + // then we divide the number by the power found + afterDecimal := dec.atoi64(startDecimal, endDecimal) + expI := endDecimal - startDecimal + 2 + if expI >= len(pow10uint64) || expI < 0 { + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + pow := pow10uint64[expI] + floatVal := float64(beforeDecimal+afterDecimal) / float64(pow) + // we have the floating value, now multiply by the exponent + exp, err := dec.getExponent() + if err != nil { + return 0, err + } + pExp := (exp + (exp >> 31)) ^ (exp >> 31) + 1 // abs + if pExp >= int64(len(pow10uint64)) || pExp < 0 { + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + val := floatVal * float64(pow10uint64[pExp]) + return int64(val), nil + case ' ', '\t', '\n', ',', ']', '}': + dec.cursor = j + return dec.atoi64(start, end), nil + default: + dec.cursor = j + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + } + return dec.atoi64(start, end), nil + case 'e', 'E': + // get init n + dec.cursor = j + 1 + return dec.getInt64WithExp(dec.atoi64(start, end)) + } + // invalid json we expect numbers, dot (single one), comma, or spaces + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + return dec.atoi64(start, end), nil +} + +func (dec *Decoder) getInt64WithExp(init int64) (int64, error) { + var exp uint64 + var sign = int64(1) + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case '+': + continue + case '-': + sign = -1 + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + uintv := uint64(digits[dec.data[dec.cursor]]) + exp = (exp << 3) + (exp << 1) + uintv + dec.cursor++ + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + uintv := uint64(digits[dec.data[dec.cursor]]) + exp = (exp << 3) + (exp << 1) + uintv + case ' ', '\t', '\n', '}', ',', ']': + if exp+1 >= uint64(len(pow10uint64)) { + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + if sign == -1 { + return init * (1 / int64(pow10uint64[exp+1])), nil + } + return init * int64(pow10uint64[exp+1]), nil + default: + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + } + if exp+1 >= uint64(len(pow10uint64)) { + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + if sign == -1 { + return init * (1 / int64(pow10uint64[exp+1])), nil + } + return init * int64(pow10uint64[exp+1]), nil + default: + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + } + return 0, dec.raiseInvalidJSONErr(dec.cursor) +} + +func (dec *Decoder) atoi64(start, end int) int64 { + var ll = end + 1 - start + var val = int64(digits[dec.data[start]]) + end = end + 1 + if ll < maxInt64Length { + for i := start + 1; i < end; i++ { + intv := int64(digits[dec.data[i]]) + val = (val << 3) + (val << 1) + intv + } + return val + } else if ll == maxInt64Length { + for i := start + 1; i < end; i++ { + intv := int64(digits[dec.data[i]]) + if val > maxInt64toMultiply { + dec.err = dec.makeInvalidUnmarshalErr(val) + return 0 + } + val = (val << 3) + (val << 1) + if math.MaxInt64-val < intv { + dec.err = dec.makeInvalidUnmarshalErr(val) + return 0 + } + val += intv + } + } else { + dec.err = dec.makeInvalidUnmarshalErr(val) + return 0 + } + return val +} + +func (dec *Decoder) atoi32(start, end int) int32 { + var ll = end + 1 - start + var val = int32(digits[dec.data[start]]) + end = end + 1 + + // overflowing + if ll < maxInt32Length { + for i := start + 1; i < end; i++ { + intv := int32(digits[dec.data[i]]) + val = (val << 3) + (val << 1) + intv + } + } else if ll == maxInt32Length { + for i := start + 1; i < end; i++ { + intv := int32(digits[dec.data[i]]) + if val > maxInt32toMultiply { + dec.err = dec.makeInvalidUnmarshalErr(val) + return 0 + } + val = (val << 3) + (val << 1) + if math.MaxInt32-val < intv { + dec.err = dec.makeInvalidUnmarshalErr(val) + return 0 + } + val += intv + } + } else { + dec.err = dec.makeInvalidUnmarshalErr(val) + return 0 + } + return val +} + +func (dec *Decoder) atoi16(start, end int) int16 { + var ll = end + 1 - start + var val = int16(digits[dec.data[start]]) + end = end + 1 + // overflowing + if ll < maxInt16Length { + for i := start + 1; i < end; i++ { + intv := int16(digits[dec.data[i]]) + val = (val << 3) + (val << 1) + intv + } + } else if ll == maxInt16Length { + for i := start + 1; i < end; i++ { + intv := int16(digits[dec.data[i]]) + if val > maxInt16toMultiply { + dec.err = dec.makeInvalidUnmarshalErr(val) + return 0 + } + val = (val << 3) + (val << 1) + if math.MaxInt16-val < intv { + dec.err = dec.makeInvalidUnmarshalErr(val) + return 0 + } + val += intv + } + } else { + dec.err = dec.makeInvalidUnmarshalErr(val) + return 0 + } + return val +} + +func (dec *Decoder) atoi8(start, end int) int8 { + var ll = end + 1 - start + var val = int8(digits[dec.data[start]]) + end = end + 1 + // overflowing + if ll < maxInt8Length { + for i := start + 1; i < end; i++ { + intv := int8(digits[dec.data[i]]) + val = (val << 3) + (val << 1) + intv + } + } else if ll == maxInt8Length { + for i := start + 1; i < end; i++ { + intv := int8(digits[dec.data[i]]) + if val > maxInt8toMultiply { + dec.err = dec.makeInvalidUnmarshalErr(val) + return 0 + } + val = (val << 3) + (val << 1) + if math.MaxInt8-val < intv { + dec.err = dec.makeInvalidUnmarshalErr(val) + return 0 + } + val += intv + } + } else { + dec.err = dec.makeInvalidUnmarshalErr(val) + return 0 + } + return val +} + +// Add Values functions + +// AddInt decodes the JSON value within an object or an array to an *int. +// If next key value overflows int, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) AddInt(v *int) error { + return dec.Int(v) +} + +// AddIntNull decodes the JSON value within an object or an array to an *int. +// If next key value overflows int, an InvalidUnmarshalError error will be returned. +// If a `null` is encountered, gojay does not change the value of the pointer. +func (dec *Decoder) AddIntNull(v **int) error { + return dec.IntNull(v) +} + +// AddInt8 decodes the JSON value within an object or an array to an *int. +// If next key value overflows int8, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) AddInt8(v *int8) error { + return dec.Int8(v) +} + +// AddInt8Null decodes the JSON value within an object or an array to an *int. +// If next key value overflows int8, an InvalidUnmarshalError error will be returned. +// If a `null` is encountered, gojay does not change the value of the pointer. +func (dec *Decoder) AddInt8Null(v **int8) error { + return dec.Int8Null(v) +} + +// AddInt16 decodes the JSON value within an object or an array to an *int. +// If next key value overflows int16, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) AddInt16(v *int16) error { + return dec.Int16(v) +} + +// AddInt16Null decodes the JSON value within an object or an array to an *int. +// If next key value overflows int16, an InvalidUnmarshalError error will be returned. +// If a `null` is encountered, gojay does not change the value of the pointer. +func (dec *Decoder) AddInt16Null(v **int16) error { + return dec.Int16Null(v) +} + +// AddInt32 decodes the JSON value within an object or an array to an *int. +// If next key value overflows int32, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) AddInt32(v *int32) error { + return dec.Int32(v) +} + +// AddInt32Null decodes the JSON value within an object or an array to an *int. +// If next key value overflows int32, an InvalidUnmarshalError error will be returned. +// If a `null` is encountered, gojay does not change the value of the pointer. +func (dec *Decoder) AddInt32Null(v **int32) error { + return dec.Int32Null(v) +} + +// AddInt64 decodes the JSON value within an object or an array to an *int. +// If next key value overflows int64, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) AddInt64(v *int64) error { + return dec.Int64(v) +} + +// AddInt64Null decodes the JSON value within an object or an array to an *int. +// If next key value overflows int64, an InvalidUnmarshalError error will be returned. +// If a `null` is encountered, gojay does not change the value of the pointer. +func (dec *Decoder) AddInt64Null(v **int64) error { + return dec.Int64Null(v) +} + +// Int decodes the JSON value within an object or an array to an *int. +// If next key value overflows int, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) Int(v *int) error { + err := dec.decodeInt(v) + if err != nil { + return err + } + dec.called |= 1 + return nil +} + +// IntNull decodes the JSON value within an object or an array to an *int. +// If next key value overflows int, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) IntNull(v **int) error { + err := dec.decodeIntNull(v) + if err != nil { + return err + } + dec.called |= 1 + return nil +} + +// Int8 decodes the JSON value within an object or an array to an *int. +// If next key value overflows int8, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) Int8(v *int8) error { + err := dec.decodeInt8(v) + if err != nil { + return err + } + dec.called |= 1 + return nil +} + +// Int8Null decodes the JSON value within an object or an array to an *int. +// If next key value overflows int8, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) Int8Null(v **int8) error { + err := dec.decodeInt8Null(v) + if err != nil { + return err + } + dec.called |= 1 + return nil +} + +// Int16 decodes the JSON value within an object or an array to an *int. +// If next key value overflows int16, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) Int16(v *int16) error { + err := dec.decodeInt16(v) + if err != nil { + return err + } + dec.called |= 1 + return nil +} + +// Int16Null decodes the JSON value within an object or an array to an *int. +// If next key value overflows int16, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) Int16Null(v **int16) error { + err := dec.decodeInt16Null(v) + if err != nil { + return err + } + dec.called |= 1 + return nil +} + +// Int32 decodes the JSON value within an object or an array to an *int. +// If next key value overflows int32, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) Int32(v *int32) error { + err := dec.decodeInt32(v) + if err != nil { + return err + } + dec.called |= 1 + return nil +} + +// Int32Null decodes the JSON value within an object or an array to an *int. +// If next key value overflows int32, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) Int32Null(v **int32) error { + err := dec.decodeInt32Null(v) + if err != nil { + return err + } + dec.called |= 1 + return nil +} + +// Int64 decodes the JSON value within an object or an array to an *int. +// If next key value overflows int64, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) Int64(v *int64) error { + err := dec.decodeInt64(v) + if err != nil { + return err + } + dec.called |= 1 + return nil +} + +// Int64Null decodes the JSON value within an object or an array to an *int. +// If next key value overflows int64, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) Int64Null(v **int64) error { + err := dec.decodeInt64Null(v) + if err != nil { + return err + } + dec.called |= 1 + return nil +} diff --git a/vendor/github.com/francoispqt/gojay/decode_number_uint.go b/vendor/github.com/francoispqt/gojay/decode_number_uint.go new file mode 100644 index 0000000..b57ef7a --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/decode_number_uint.go @@ -0,0 +1,715 @@ +package gojay + +import ( + "math" +) + +// DecodeUint8 reads the next JSON-encoded value from the decoder's input (io.Reader) and stores it in the uint8 pointed to by v. +// +// See the documentation for Unmarshal for details about the conversion of JSON into a Go value. +func (dec *Decoder) DecodeUint8(v *uint8) error { + if dec.isPooled == 1 { + panic(InvalidUsagePooledDecoderError("Invalid usage of pooled decoder")) + } + return dec.decodeUint8(v) +} + +func (dec *Decoder) decodeUint8(v *uint8) error { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch c := dec.data[dec.cursor]; c { + case ' ', '\n', '\t', '\r', ',': + continue + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + val, err := dec.getUint8() + if err != nil { + return err + } + *v = val + return nil + case '-': // if negative, we just set it to 0 and set error + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + return nil + default: + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + } + } + return dec.raiseInvalidJSONErr(dec.cursor) +} +func (dec *Decoder) decodeUint8Null(v **uint8) error { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch c := dec.data[dec.cursor]; c { + case ' ', '\n', '\t', '\r', ',': + continue + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + val, err := dec.getUint8() + if err != nil { + return err + } + if *v == nil { + *v = new(uint8) + } + **v = val + return nil + case '-': // if negative, we just set it to 0 and set error + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + if *v == nil { + *v = new(uint8) + } + return nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + return nil + default: + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + } + } + return dec.raiseInvalidJSONErr(dec.cursor) +} + +func (dec *Decoder) getUint8() (uint8, error) { + var end = dec.cursor + var start = dec.cursor + // look for following numbers + for j := dec.cursor + 1; j < dec.length || dec.read(); j++ { + switch dec.data[j] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + end = j + continue + case ' ', '\n', '\t', '\r': + continue + case '.', ',', '}', ']': + dec.cursor = j + return dec.atoui8(start, end), nil + } + // invalid json we expect numbers, dot (single one), comma, or spaces + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + return dec.atoui8(start, end), nil +} + +// DecodeUint16 reads the next JSON-encoded value from the decoder's input (io.Reader) and stores it in the uint16 pointed to by v. +// +// See the documentation for Unmarshal for details about the conversion of JSON into a Go value. +func (dec *Decoder) DecodeUint16(v *uint16) error { + if dec.isPooled == 1 { + panic(InvalidUsagePooledDecoderError("Invalid usage of pooled decoder")) + } + return dec.decodeUint16(v) +} + +func (dec *Decoder) decodeUint16(v *uint16) error { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch c := dec.data[dec.cursor]; c { + case ' ', '\n', '\t', '\r', ',': + continue + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + val, err := dec.getUint16() + if err != nil { + return err + } + *v = val + return nil + case '-': + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + return nil + default: + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + } + } + return dec.raiseInvalidJSONErr(dec.cursor) +} +func (dec *Decoder) decodeUint16Null(v **uint16) error { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch c := dec.data[dec.cursor]; c { + case ' ', '\n', '\t', '\r', ',': + continue + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + val, err := dec.getUint16() + if err != nil { + return err + } + if *v == nil { + *v = new(uint16) + } + **v = val + return nil + case '-': + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + if *v == nil { + *v = new(uint16) + } + return nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + return nil + default: + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + } + } + return dec.raiseInvalidJSONErr(dec.cursor) +} + +func (dec *Decoder) getUint16() (uint16, error) { + var end = dec.cursor + var start = dec.cursor + // look for following numbers + for j := dec.cursor + 1; j < dec.length || dec.read(); j++ { + switch dec.data[j] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + end = j + continue + case ' ', '\n', '\t', '\r': + continue + case '.', ',', '}', ']': + dec.cursor = j + return dec.atoui16(start, end), nil + } + // invalid json we expect numbers, dot (single one), comma, or spaces + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + return dec.atoui16(start, end), nil +} + +// DecodeUint32 reads the next JSON-encoded value from the decoder's input (io.Reader) and stores it in the uint32 pointed to by v. +// +// See the documentation for Unmarshal for details about the conversion of JSON into a Go value. +func (dec *Decoder) DecodeUint32(v *uint32) error { + if dec.isPooled == 1 { + panic(InvalidUsagePooledDecoderError("Invalid usage of pooled decoder")) + } + return dec.decodeUint32(v) +} + +func (dec *Decoder) decodeUint32(v *uint32) error { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch c := dec.data[dec.cursor]; c { + case ' ', '\n', '\t', '\r', ',': + continue + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + val, err := dec.getUint32() + if err != nil { + return err + } + *v = val + return nil + case '-': + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + return nil + default: + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + } + } + return dec.raiseInvalidJSONErr(dec.cursor) +} +func (dec *Decoder) decodeUint32Null(v **uint32) error { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch c := dec.data[dec.cursor]; c { + case ' ', '\n', '\t', '\r', ',': + continue + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + val, err := dec.getUint32() + if err != nil { + return err + } + if *v == nil { + *v = new(uint32) + } + **v = val + return nil + case '-': + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + if *v == nil { + *v = new(uint32) + } + return nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + return nil + default: + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + } + } + return dec.raiseInvalidJSONErr(dec.cursor) +} + +func (dec *Decoder) getUint32() (uint32, error) { + var end = dec.cursor + var start = dec.cursor + // look for following numbers + for j := dec.cursor + 1; j < dec.length || dec.read(); j++ { + switch dec.data[j] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + end = j + continue + case ' ', '\n', '\t', '\r': + continue + case '.', ',', '}', ']': + dec.cursor = j + return dec.atoui32(start, end), nil + } + // invalid json we expect numbers, dot (single one), comma, or spaces + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + return dec.atoui32(start, end), nil +} + +// DecodeUint64 reads the next JSON-encoded value from the decoder's input (io.Reader) and stores it in the uint64 pointed to by v. +// +// See the documentation for Unmarshal for details about the conversion of JSON into a Go value. +func (dec *Decoder) DecodeUint64(v *uint64) error { + if dec.isPooled == 1 { + panic(InvalidUsagePooledDecoderError("Invalid usage of pooled decoder")) + } + return dec.decodeUint64(v) +} +func (dec *Decoder) decodeUint64(v *uint64) error { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch c := dec.data[dec.cursor]; c { + case ' ', '\n', '\t', '\r', ',': + continue + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + val, err := dec.getUint64() + if err != nil { + return err + } + *v = val + return nil + case '-': + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + return nil + default: + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + } + } + return dec.raiseInvalidJSONErr(dec.cursor) +} +func (dec *Decoder) decodeUint64Null(v **uint64) error { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch c := dec.data[dec.cursor]; c { + case ' ', '\n', '\t', '\r', ',': + continue + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + val, err := dec.getUint64() + if err != nil { + return err + } + if *v == nil { + *v = new(uint64) + } + **v = val + return nil + case '-': + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + if *v == nil { + *v = new(uint64) + } + return nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + return nil + default: + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + } + } + return dec.raiseInvalidJSONErr(dec.cursor) +} + +func (dec *Decoder) getUint64() (uint64, error) { + var end = dec.cursor + var start = dec.cursor + // look for following numbers + for j := dec.cursor + 1; j < dec.length || dec.read(); j++ { + switch dec.data[j] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + end = j + continue + case ' ', '\n', '\t', '\r', '.', ',', '}', ']': + dec.cursor = j + return dec.atoui64(start, end), nil + } + // invalid json we expect numbers, dot (single one), comma, or spaces + return 0, dec.raiseInvalidJSONErr(dec.cursor) + } + return dec.atoui64(start, end), nil +} + +func (dec *Decoder) atoui64(start, end int) uint64 { + var ll = end + 1 - start + var val = uint64(digits[dec.data[start]]) + end = end + 1 + if ll < maxUint64Length { + for i := start + 1; i < end; i++ { + uintv := uint64(digits[dec.data[i]]) + val = (val << 3) + (val << 1) + uintv + } + } else if ll == maxUint64Length { + for i := start + 1; i < end; i++ { + uintv := uint64(digits[dec.data[i]]) + if val > maxUint64toMultiply { + dec.err = dec.makeInvalidUnmarshalErr(val) + return 0 + } + val = (val << 3) + (val << 1) + if math.MaxUint64-val < uintv { + dec.err = dec.makeInvalidUnmarshalErr(val) + return 0 + } + val += uintv + } + } else { + dec.err = dec.makeInvalidUnmarshalErr(val) + return 0 + } + return val +} + +func (dec *Decoder) atoui32(start, end int) uint32 { + var ll = end + 1 - start + var val uint32 + val = uint32(digits[dec.data[start]]) + end = end + 1 + if ll < maxUint32Length { + for i := start + 1; i < end; i++ { + uintv := uint32(digits[dec.data[i]]) + val = (val << 3) + (val << 1) + uintv + } + } else if ll == maxUint32Length { + for i := start + 1; i < end; i++ { + uintv := uint32(digits[dec.data[i]]) + if val > maxUint32toMultiply { + dec.err = dec.makeInvalidUnmarshalErr(val) + return 0 + } + val = (val << 3) + (val << 1) + if math.MaxUint32-val < uintv { + dec.err = dec.makeInvalidUnmarshalErr(val) + return 0 + } + val += uintv + } + } else if ll > maxUint32Length { + dec.err = dec.makeInvalidUnmarshalErr(val) + val = 0 + } + return val +} + +func (dec *Decoder) atoui16(start, end int) uint16 { + var ll = end + 1 - start + var val uint16 + val = uint16(digits[dec.data[start]]) + end = end + 1 + if ll < maxUint16Length { + for i := start + 1; i < end; i++ { + uintv := uint16(digits[dec.data[i]]) + val = (val << 3) + (val << 1) + uintv + } + } else if ll == maxUint16Length { + for i := start + 1; i < end; i++ { + uintv := uint16(digits[dec.data[i]]) + if val > maxUint16toMultiply { + dec.err = dec.makeInvalidUnmarshalErr(val) + return 0 + } + val = (val << 3) + (val << 1) + if math.MaxUint16-val < uintv { + dec.err = dec.makeInvalidUnmarshalErr(val) + return 0 + } + val += uintv + } + } else if ll > maxUint16Length { + dec.err = dec.makeInvalidUnmarshalErr(val) + val = 0 + } + return val +} + +func (dec *Decoder) atoui8(start, end int) uint8 { + var ll = end + 1 - start + var val uint8 + val = uint8(digits[dec.data[start]]) + end = end + 1 + if ll < maxUint8Length { + for i := start + 1; i < end; i++ { + uintv := uint8(digits[dec.data[i]]) + val = (val << 3) + (val << 1) + uintv + } + } else if ll == maxUint8Length { + for i := start + 1; i < end; i++ { + uintv := uint8(digits[dec.data[i]]) + if val > maxUint8toMultiply { + dec.err = dec.makeInvalidUnmarshalErr(val) + return 0 + } + val = (val << 3) + (val << 1) + if math.MaxUint8-val < uintv { + dec.err = dec.makeInvalidUnmarshalErr(val) + return 0 + } + val += uintv + } + } else if ll > maxUint8Length { + dec.err = dec.makeInvalidUnmarshalErr(val) + val = 0 + } + return val +} + +// Add Values functions + +// AddUint8 decodes the JSON value within an object or an array to an *int. +// If next key value overflows uint8, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) AddUint8(v *uint8) error { + return dec.Uint8(v) +} + +// AddUint8Null decodes the JSON value within an object or an array to an *int. +// If next key value overflows uint8, an InvalidUnmarshalError error will be returned. +// If a `null` is encountered, gojay does not change the value of the pointer. +func (dec *Decoder) AddUint8Null(v **uint8) error { + return dec.Uint8Null(v) +} + +// AddUint16 decodes the JSON value within an object or an array to an *int. +// If next key value overflows uint16, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) AddUint16(v *uint16) error { + return dec.Uint16(v) +} + +// AddUint16Null decodes the JSON value within an object or an array to an *int. +// If next key value overflows uint16, an InvalidUnmarshalError error will be returned. +// If a `null` is encountered, gojay does not change the value of the pointer. +func (dec *Decoder) AddUint16Null(v **uint16) error { + return dec.Uint16Null(v) +} + +// AddUint32 decodes the JSON value within an object or an array to an *int. +// If next key value overflows uint32, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) AddUint32(v *uint32) error { + return dec.Uint32(v) +} + +// AddUint32Null decodes the JSON value within an object or an array to an *int. +// If next key value overflows uint32, an InvalidUnmarshalError error will be returned. +// If a `null` is encountered, gojay does not change the value of the pointer. +func (dec *Decoder) AddUint32Null(v **uint32) error { + return dec.Uint32Null(v) +} + +// AddUint64 decodes the JSON value within an object or an array to an *int. +// If next key value overflows uint64, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) AddUint64(v *uint64) error { + return dec.Uint64(v) +} + +// AddUint64Null decodes the JSON value within an object or an array to an *int. +// If next key value overflows uint64, an InvalidUnmarshalError error will be returned. +// If a `null` is encountered, gojay does not change the value of the pointer. +func (dec *Decoder) AddUint64Null(v **uint64) error { + return dec.Uint64Null(v) +} + +// Uint8 decodes the JSON value within an object or an array to an *int. +// If next key value overflows uint8, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) Uint8(v *uint8) error { + err := dec.decodeUint8(v) + if err != nil { + return err + } + dec.called |= 1 + return nil +} + +// Uint8Null decodes the JSON value within an object or an array to an *int. +// If next key value overflows uint8, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) Uint8Null(v **uint8) error { + err := dec.decodeUint8Null(v) + if err != nil { + return err + } + dec.called |= 1 + return nil +} + +// Uint16 decodes the JSON value within an object or an array to an *int. +// If next key value overflows uint16, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) Uint16(v *uint16) error { + err := dec.decodeUint16(v) + if err != nil { + return err + } + dec.called |= 1 + return nil +} + +// Uint16Null decodes the JSON value within an object or an array to an *int. +// If next key value overflows uint16, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) Uint16Null(v **uint16) error { + err := dec.decodeUint16Null(v) + if err != nil { + return err + } + dec.called |= 1 + return nil +} + +// Uint32 decodes the JSON value within an object or an array to an *int. +// If next key value overflows uint32, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) Uint32(v *uint32) error { + err := dec.decodeUint32(v) + if err != nil { + return err + } + dec.called |= 1 + return nil +} + +// Uint32Null decodes the JSON value within an object or an array to an *int. +// If next key value overflows uint32, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) Uint32Null(v **uint32) error { + err := dec.decodeUint32Null(v) + if err != nil { + return err + } + dec.called |= 1 + return nil +} + +// Uint64 decodes the JSON value within an object or an array to an *int. +// If next key value overflows uint64, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) Uint64(v *uint64) error { + err := dec.decodeUint64(v) + if err != nil { + return err + } + dec.called |= 1 + return nil +} + +// Uint64Null decodes the JSON value within an object or an array to an *int. +// If next key value overflows uint64, an InvalidUnmarshalError error will be returned. +func (dec *Decoder) Uint64Null(v **uint64) error { + err := dec.decodeUint64Null(v) + if err != nil { + return err + } + dec.called |= 1 + return nil +} diff --git a/vendor/github.com/francoispqt/gojay/decode_object.go b/vendor/github.com/francoispqt/gojay/decode_object.go new file mode 100644 index 0000000..0fec9d2 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/decode_object.go @@ -0,0 +1,407 @@ +package gojay + +import ( + "reflect" + "unsafe" +) + +// DecodeObject reads the next JSON-encoded value from the decoder's input (io.Reader) and stores it in the value pointed to by v. +// +// v must implement UnmarshalerJSONObject. +// +// See the documentation for Unmarshal for details about the conversion of JSON into a Go value. +func (dec *Decoder) DecodeObject(j UnmarshalerJSONObject) error { + if dec.isPooled == 1 { + panic(InvalidUsagePooledDecoderError("Invalid usage of pooled decoder")) + } + _, err := dec.decodeObject(j) + return err +} +func (dec *Decoder) decodeObject(j UnmarshalerJSONObject) (int, error) { + keys := j.NKeys() + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case ' ', '\n', '\t', '\r', ',': + case '{': + dec.cursor = dec.cursor + 1 + // if keys is zero we will parse all keys + // we run two loops for micro optimization + if keys == 0 { + for dec.cursor < dec.length || dec.read() { + k, done, err := dec.nextKey() + if err != nil { + return 0, err + } else if done { + return dec.cursor, nil + } + err = j.UnmarshalJSONObject(dec, k) + if err != nil { + dec.err = err + return 0, err + } else if dec.called&1 == 0 { + err := dec.skipData() + if err != nil { + return 0, err + } + } else { + dec.keysDone++ + } + dec.called &= 0 + } + } else { + for (dec.cursor < dec.length || dec.read()) && dec.keysDone < keys { + k, done, err := dec.nextKey() + if err != nil { + return 0, err + } else if done { + return dec.cursor, nil + } + err = j.UnmarshalJSONObject(dec, k) + if err != nil { + dec.err = err + return 0, err + } else if dec.called&1 == 0 { + err := dec.skipData() + if err != nil { + return 0, err + } + } else { + dec.keysDone++ + } + dec.called &= 0 + } + } + // will get to that point when keysDone is not lower than keys anymore + // in that case, we make sure cursor goes to the end of object, but we skip + // unmarshalling + if dec.child&1 != 0 { + end, err := dec.skipObject() + dec.cursor = end + return dec.cursor, err + } + return dec.cursor, nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return 0, err + } + return dec.cursor, nil + default: + // can't unmarshal to struct + dec.err = dec.makeInvalidUnmarshalErr(j) + err := dec.skipData() + if err != nil { + return 0, err + } + return dec.cursor, nil + } + } + return 0, dec.raiseInvalidJSONErr(dec.cursor) +} + +func (dec *Decoder) decodeObjectNull(v interface{}) (int, error) { + // make sure the value is a pointer + vv := reflect.ValueOf(v) + vvt := vv.Type() + if vvt.Kind() != reflect.Ptr || vvt.Elem().Kind() != reflect.Ptr { + dec.err = ErrUnmarshalPtrExpected + return 0, dec.err + } + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case ' ', '\n', '\t', '\r', ',': + case '{': + elt := vv.Elem() + n := reflect.New(elt.Type().Elem()) + elt.Set(n) + var j UnmarshalerJSONObject + var ok bool + if j, ok = n.Interface().(UnmarshalerJSONObject); !ok { + dec.err = dec.makeInvalidUnmarshalErr((UnmarshalerJSONObject)(nil)) + return 0, dec.err + } + keys := j.NKeys() + dec.cursor = dec.cursor + 1 + // if keys is zero we will parse all keys + // we run two loops for micro optimization + if keys == 0 { + for dec.cursor < dec.length || dec.read() { + k, done, err := dec.nextKey() + if err != nil { + return 0, err + } else if done { + return dec.cursor, nil + } + err = j.UnmarshalJSONObject(dec, k) + if err != nil { + dec.err = err + return 0, err + } else if dec.called&1 == 0 { + err := dec.skipData() + if err != nil { + return 0, err + } + } else { + dec.keysDone++ + } + dec.called &= 0 + } + } else { + for (dec.cursor < dec.length || dec.read()) && dec.keysDone < keys { + k, done, err := dec.nextKey() + if err != nil { + return 0, err + } else if done { + return dec.cursor, nil + } + err = j.UnmarshalJSONObject(dec, k) + if err != nil { + dec.err = err + return 0, err + } else if dec.called&1 == 0 { + err := dec.skipData() + if err != nil { + return 0, err + } + } else { + dec.keysDone++ + } + dec.called &= 0 + } + } + // will get to that point when keysDone is not lower than keys anymore + // in that case, we make sure cursor goes to the end of object, but we skip + // unmarshalling + if dec.child&1 != 0 { + end, err := dec.skipObject() + dec.cursor = end + return dec.cursor, err + } + return dec.cursor, nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return 0, err + } + return dec.cursor, nil + default: + // can't unmarshal to struct + dec.err = dec.makeInvalidUnmarshalErr((UnmarshalerJSONObject)(nil)) + err := dec.skipData() + if err != nil { + return 0, err + } + return dec.cursor, nil + } + } + return 0, dec.raiseInvalidJSONErr(dec.cursor) +} + +func (dec *Decoder) skipObject() (int, error) { + var objectsOpen = 1 + var objectsClosed = 0 + for j := dec.cursor; j < dec.length || dec.read(); j++ { + switch dec.data[j] { + case '}': + objectsClosed++ + // everything is closed return + if objectsOpen == objectsClosed { + // add char to object data + return j + 1, nil + } + case '{': + objectsOpen++ + case '"': + j++ + var isInEscapeSeq bool + var isFirstQuote = true + for ; j < dec.length || dec.read(); j++ { + if dec.data[j] != '"' { + continue + } + if dec.data[j-1] != '\\' || (!isInEscapeSeq && !isFirstQuote) { + break + } else { + isInEscapeSeq = false + } + if isFirstQuote { + isFirstQuote = false + } + // loop backward and count how many anti slash found + // to see if string is effectively escaped + ct := 0 + for i := j - 1; i > 0; i-- { + if dec.data[i] != '\\' { + break + } + ct++ + } + // is pair number of slashes, quote is not escaped + if ct&1 == 0 { + break + } + isInEscapeSeq = true + } + default: + continue + } + } + return 0, dec.raiseInvalidJSONErr(dec.cursor) +} + +func (dec *Decoder) nextKey() (string, bool, error) { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case ' ', '\n', '\t', '\r', ',': + continue + case '"': + dec.cursor = dec.cursor + 1 + start, end, err := dec.getString() + if err != nil { + return "", false, err + } + var found byte + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + if dec.data[dec.cursor] == ':' { + found |= 1 + break + } + } + if found&1 != 0 { + dec.cursor++ + d := dec.data[start : end-1] + return *(*string)(unsafe.Pointer(&d)), false, nil + } + return "", false, dec.raiseInvalidJSONErr(dec.cursor) + case '}': + dec.cursor = dec.cursor + 1 + return "", true, nil + default: + // can't unmarshall to struct + return "", false, dec.raiseInvalidJSONErr(dec.cursor) + } + } + return "", false, dec.raiseInvalidJSONErr(dec.cursor) +} + +func (dec *Decoder) skipData() error { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case ' ', '\n', '\t', '\r', ',': + continue + // is null + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + return nil + case 't': + dec.cursor++ + err := dec.assertTrue() + if err != nil { + return err + } + return nil + // is false + case 'f': + dec.cursor++ + err := dec.assertFalse() + if err != nil { + return err + } + return nil + // is an object + case '{': + dec.cursor = dec.cursor + 1 + end, err := dec.skipObject() + dec.cursor = end + return err + // is string + case '"': + dec.cursor = dec.cursor + 1 + err := dec.skipString() + return err + // is array + case '[': + dec.cursor = dec.cursor + 1 + end, err := dec.skipArray() + dec.cursor = end + return err + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': + end, err := dec.skipNumber() + dec.cursor = end + return err + } + return dec.raiseInvalidJSONErr(dec.cursor) + } + return dec.raiseInvalidJSONErr(dec.cursor) +} + +// DecodeObjectFunc is a func type implementing UnmarshalerJSONObject. +// Use it to cast a `func(*Decoder, k string) error` to Unmarshal an object on the fly. +type DecodeObjectFunc func(*Decoder, string) error + +// UnmarshalJSONObject implements UnmarshalerJSONObject. +func (f DecodeObjectFunc) UnmarshalJSONObject(dec *Decoder, k string) error { + return f(dec, k) +} + +// NKeys implements UnmarshalerJSONObject. +func (f DecodeObjectFunc) NKeys() int { + return 0 +} + +// Add Values functions + +// AddObject decodes the JSON value within an object or an array to a UnmarshalerJSONObject. +func (dec *Decoder) AddObject(v UnmarshalerJSONObject) error { + return dec.Object(v) +} + +// AddObjectNull decodes the JSON value within an object or an array to a UnmarshalerJSONObject. +func (dec *Decoder) AddObjectNull(v interface{}) error { + return dec.ObjectNull(v) +} + +// Object decodes the JSON value within an object or an array to a UnmarshalerJSONObject. +func (dec *Decoder) Object(value UnmarshalerJSONObject) error { + initialKeysDone := dec.keysDone + initialChild := dec.child + dec.keysDone = 0 + dec.called = 0 + dec.child |= 1 + newCursor, err := dec.decodeObject(value) + if err != nil { + return err + } + dec.cursor = newCursor + dec.keysDone = initialKeysDone + dec.child = initialChild + dec.called |= 1 + return nil +} + +// ObjectNull decodes the JSON value within an object or an array to a UnmarshalerJSONObject. +// v should be a pointer to an UnmarshalerJSONObject, +// if `null` value is encountered in JSON, it will leave the value v untouched, +// else it will create a new instance of the UnmarshalerJSONObject behind v. +func (dec *Decoder) ObjectNull(v interface{}) error { + initialKeysDone := dec.keysDone + initialChild := dec.child + dec.keysDone = 0 + dec.called = 0 + dec.child |= 1 + newCursor, err := dec.decodeObjectNull(v) + if err != nil { + return err + } + dec.cursor = newCursor + dec.keysDone = initialKeysDone + dec.child = initialChild + dec.called |= 1 + return nil +} diff --git a/vendor/github.com/francoispqt/gojay/decode_pool.go b/vendor/github.com/francoispqt/gojay/decode_pool.go new file mode 100644 index 0000000..68c5713 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/decode_pool.go @@ -0,0 +1,64 @@ +package gojay + +import ( + "io" + "sync" +) + +var decPool = sync.Pool{ + New: newDecoderPool, +} + +func init() { + for i := 0; i < 32; i++ { + decPool.Put(NewDecoder(nil)) + } +} + +// NewDecoder returns a new decoder. +// It takes an io.Reader implementation as data input. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + called: 0, + cursor: 0, + keysDone: 0, + err: nil, + r: r, + data: make([]byte, 512), + length: 0, + isPooled: 0, + } +} +func newDecoderPool() interface{} { + return NewDecoder(nil) +} + +// BorrowDecoder borrows a Decoder from the pool. +// It takes an io.Reader implementation as data input. +// +// In order to benefit from the pool, a borrowed decoder must be released after usage. +func BorrowDecoder(r io.Reader) *Decoder { + return borrowDecoder(r, 512) +} +func borrowDecoder(r io.Reader, bufSize int) *Decoder { + dec := decPool.Get().(*Decoder) + dec.called = 0 + dec.keysDone = 0 + dec.cursor = 0 + dec.err = nil + dec.r = r + dec.length = 0 + dec.isPooled = 0 + if bufSize > 0 { + dec.data = make([]byte, bufSize) + } + return dec +} + +// Release sends back a Decoder to the pool. +// If a decoder is used after calling Release +// a panic will be raised with an InvalidUsagePooledDecoderError error. +func (dec *Decoder) Release() { + dec.isPooled = 1 + decPool.Put(dec) +} diff --git a/vendor/github.com/francoispqt/gojay/decode_slice.go b/vendor/github.com/francoispqt/gojay/decode_slice.go new file mode 100644 index 0000000..dbbb4bf --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/decode_slice.go @@ -0,0 +1,89 @@ +package gojay + +// AddSliceString unmarshals the next JSON array of strings to the given *[]string s +func (dec *Decoder) AddSliceString(s *[]string) error { + return dec.SliceString(s) +} + +// SliceString unmarshals the next JSON array of strings to the given *[]string s +func (dec *Decoder) SliceString(s *[]string) error { + err := dec.Array(DecodeArrayFunc(func(dec *Decoder) error { + var str string + if err := dec.String(&str); err != nil { + return err + } + *s = append(*s, str) + return nil + })) + + if err != nil { + return err + } + return nil +} + +// AddSliceInt unmarshals the next JSON array of integers to the given *[]int s +func (dec *Decoder) AddSliceInt(s *[]int) error { + return dec.SliceInt(s) +} + +// SliceInt unmarshals the next JSON array of integers to the given *[]int s +func (dec *Decoder) SliceInt(s *[]int) error { + err := dec.Array(DecodeArrayFunc(func(dec *Decoder) error { + var i int + if err := dec.Int(&i); err != nil { + return err + } + *s = append(*s, i) + return nil + })) + + if err != nil { + return err + } + return nil +} + +// AddFloat64 unmarshals the next JSON array of floats to the given *[]float64 s +func (dec *Decoder) AddSliceFloat64(s *[]float64) error { + return dec.SliceFloat64(s) +} + +// SliceFloat64 unmarshals the next JSON array of floats to the given *[]float64 s +func (dec *Decoder) SliceFloat64(s *[]float64) error { + err := dec.Array(DecodeArrayFunc(func(dec *Decoder) error { + var i float64 + if err := dec.Float64(&i); err != nil { + return err + } + *s = append(*s, i) + return nil + })) + + if err != nil { + return err + } + return nil +} + +// AddBool unmarshals the next JSON array of boolegers to the given *[]bool s +func (dec *Decoder) AddSliceBool(s *[]bool) error { + return dec.SliceBool(s) +} + +// SliceBool unmarshals the next JSON array of boolegers to the given *[]bool s +func (dec *Decoder) SliceBool(s *[]bool) error { + err := dec.Array(DecodeArrayFunc(func(dec *Decoder) error { + var b bool + if err := dec.Bool(&b); err != nil { + return err + } + *s = append(*s, b) + return nil + })) + + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/francoispqt/gojay/decode_sqlnull.go b/vendor/github.com/francoispqt/gojay/decode_sqlnull.go new file mode 100644 index 0000000..c25549f --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/decode_sqlnull.go @@ -0,0 +1,157 @@ +package gojay + +import "database/sql" + +// DecodeSQLNullString decodes a sql.NullString +func (dec *Decoder) DecodeSQLNullString(v *sql.NullString) error { + if dec.isPooled == 1 { + panic(InvalidUsagePooledDecoderError("Invalid usage of pooled decoder")) + } + return dec.decodeSQLNullString(v) +} + +func (dec *Decoder) decodeSQLNullString(v *sql.NullString) error { + var str string + if err := dec.decodeString(&str); err != nil { + return err + } + v.String = str + v.Valid = true + return nil +} + +// DecodeSQLNullInt64 decodes a sql.NullInt64 +func (dec *Decoder) DecodeSQLNullInt64(v *sql.NullInt64) error { + if dec.isPooled == 1 { + panic(InvalidUsagePooledDecoderError("Invalid usage of pooled decoder")) + } + return dec.decodeSQLNullInt64(v) +} + +func (dec *Decoder) decodeSQLNullInt64(v *sql.NullInt64) error { + var i int64 + if err := dec.decodeInt64(&i); err != nil { + return err + } + v.Int64 = i + v.Valid = true + return nil +} + +// DecodeSQLNullFloat64 decodes a sql.NullString with the given format +func (dec *Decoder) DecodeSQLNullFloat64(v *sql.NullFloat64) error { + if dec.isPooled == 1 { + panic(InvalidUsagePooledDecoderError("Invalid usage of pooled decoder")) + } + return dec.decodeSQLNullFloat64(v) +} + +func (dec *Decoder) decodeSQLNullFloat64(v *sql.NullFloat64) error { + var i float64 + if err := dec.decodeFloat64(&i); err != nil { + return err + } + v.Float64 = i + v.Valid = true + return nil +} + +// DecodeSQLNullBool decodes a sql.NullString with the given format +func (dec *Decoder) DecodeSQLNullBool(v *sql.NullBool) error { + if dec.isPooled == 1 { + panic(InvalidUsagePooledDecoderError("Invalid usage of pooled decoder")) + } + return dec.decodeSQLNullBool(v) +} + +func (dec *Decoder) decodeSQLNullBool(v *sql.NullBool) error { + var b bool + if err := dec.decodeBool(&b); err != nil { + return err + } + v.Bool = b + v.Valid = true + return nil +} + +// Add Values functions + +// AddSQLNullString decodes the JSON value within an object or an array to qn *sql.NullString +func (dec *Decoder) AddSQLNullString(v *sql.NullString) error { + return dec.SQLNullString(v) +} + +// SQLNullString decodes the JSON value within an object or an array to an *sql.NullString +func (dec *Decoder) SQLNullString(v *sql.NullString) error { + var b *string + if err := dec.StringNull(&b); err != nil { + return err + } + if b == nil { + v.Valid = false + } else { + v.String = *b + v.Valid = true + } + return nil +} + +// AddSQLNullInt64 decodes the JSON value within an object or an array to qn *sql.NullInt64 +func (dec *Decoder) AddSQLNullInt64(v *sql.NullInt64) error { + return dec.SQLNullInt64(v) +} + +// SQLNullInt64 decodes the JSON value within an object or an array to an *sql.NullInt64 +func (dec *Decoder) SQLNullInt64(v *sql.NullInt64) error { + var b *int64 + if err := dec.Int64Null(&b); err != nil { + return err + } + if b == nil { + v.Valid = false + } else { + v.Int64 = *b + v.Valid = true + } + return nil +} + +// AddSQLNullFloat64 decodes the JSON value within an object or an array to qn *sql.NullFloat64 +func (dec *Decoder) AddSQLNullFloat64(v *sql.NullFloat64) error { + return dec.SQLNullFloat64(v) +} + +// SQLNullFloat64 decodes the JSON value within an object or an array to an *sql.NullFloat64 +func (dec *Decoder) SQLNullFloat64(v *sql.NullFloat64) error { + var b *float64 + if err := dec.Float64Null(&b); err != nil { + return err + } + if b == nil { + v.Valid = false + } else { + v.Float64 = *b + v.Valid = true + } + return nil +} + +// AddSQLNullBool decodes the JSON value within an object or an array to an *sql.NullBool +func (dec *Decoder) AddSQLNullBool(v *sql.NullBool) error { + return dec.SQLNullBool(v) +} + +// SQLNullBool decodes the JSON value within an object or an array to an *sql.NullBool +func (dec *Decoder) SQLNullBool(v *sql.NullBool) error { + var b *bool + if err := dec.BoolNull(&b); err != nil { + return err + } + if b == nil { + v.Valid = false + } else { + v.Bool = *b + v.Valid = true + } + return nil +} diff --git a/vendor/github.com/francoispqt/gojay/decode_stream.go b/vendor/github.com/francoispqt/gojay/decode_stream.go new file mode 100644 index 0000000..74beee4 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/decode_stream.go @@ -0,0 +1,115 @@ +package gojay + +import ( + "sync" + "time" +) + +// UnmarshalerStream is the interface to implement for a slice, an array or a slice +// to decode a line delimited JSON to. +type UnmarshalerStream interface { + UnmarshalStream(*StreamDecoder) error +} + +// Stream is a struct holding the Stream api +var Stream = stream{} + +type stream struct{} + +// A StreamDecoder reads and decodes JSON values from an input stream. +// +// It implements conext.Context and provide a channel to notify interruption. +type StreamDecoder struct { + mux sync.RWMutex + *Decoder + done chan struct{} + deadline *time.Time +} + +// DecodeStream reads the next line delimited JSON-encoded value from the decoder's input (io.Reader) and stores it in the value pointed to by c. +// +// c must implement UnmarshalerStream. Ideally c is a channel. See example for implementation. +// +// See the documentation for Unmarshal for details about the conversion of JSON into a Go value. +func (dec *StreamDecoder) DecodeStream(c UnmarshalerStream) error { + if dec.isPooled == 1 { + panic(InvalidUsagePooledDecoderError("Invalid usage of pooled decoder")) + } + if dec.r == nil { + dec.err = NoReaderError("No reader given to decode stream") + close(dec.done) + return dec.err + } + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case ' ', '\n', '\t', '\r', ',': + continue + default: + // char is not space start reading + for dec.nextChar() != 0 { + // calling unmarshal stream + err := c.UnmarshalStream(dec) + if err != nil { + dec.err = err + close(dec.done) + return err + } + // garbage collects buffer + // we don't want the buffer to grow extensively + dec.data = dec.data[dec.cursor:] + dec.length = dec.length - dec.cursor + dec.cursor = 0 + } + // close the done channel to signal the end of the job + close(dec.done) + return nil + } + } + close(dec.done) + dec.mux.Lock() + err := dec.raiseInvalidJSONErr(dec.cursor) + dec.mux.Unlock() + return err +} + +// context.Context implementation + +// Done returns a channel that's closed when work is done. +// It implements context.Context +func (dec *StreamDecoder) Done() <-chan struct{} { + return dec.done +} + +// Deadline returns the time when work done on behalf of this context +// should be canceled. Deadline returns ok==false when no deadline is +// set. Successive calls to Deadline return the same results. +func (dec *StreamDecoder) Deadline() (time.Time, bool) { + if dec.deadline != nil { + return *dec.deadline, true + } + return time.Time{}, false +} + +// SetDeadline sets the deadline +func (dec *StreamDecoder) SetDeadline(t time.Time) { + dec.deadline = &t +} + +// Err returns nil if Done is not yet closed. +// If Done is closed, Err returns a non-nil error explaining why. +// It implements context.Context +func (dec *StreamDecoder) Err() error { + select { + case <-dec.done: + dec.mux.RLock() + defer dec.mux.RUnlock() + return dec.err + default: + return nil + } +} + +// Value implements context.Context +func (dec *StreamDecoder) Value(key interface{}) interface{} { + return nil +} diff --git a/vendor/github.com/francoispqt/gojay/decode_stream_pool.go b/vendor/github.com/francoispqt/gojay/decode_stream_pool.go new file mode 100644 index 0000000..8e1863b --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/decode_stream_pool.go @@ -0,0 +1,59 @@ +package gojay + +import ( + "io" + "sync" +) + +var streamDecPool = sync.Pool{ + New: newStreamDecoderPool, +} + +// NewDecoder returns a new StreamDecoder. +// It takes an io.Reader implementation as data input. +// It initiates the done channel returned by Done(). +func (s stream) NewDecoder(r io.Reader) *StreamDecoder { + dec := NewDecoder(r) + streamDec := &StreamDecoder{ + Decoder: dec, + done: make(chan struct{}, 1), + mux: sync.RWMutex{}, + } + return streamDec +} +func newStreamDecoderPool() interface{} { + return Stream.NewDecoder(nil) +} + +// BorrowDecoder borrows a StreamDecoder from the pool. +// It takes an io.Reader implementation as data input. +// It initiates the done channel returned by Done(). +// +// If no StreamEncoder is available in the pool, it returns a fresh one +func (s stream) BorrowDecoder(r io.Reader) *StreamDecoder { + return s.borrowDecoder(r, 512) +} + +func (s stream) borrowDecoder(r io.Reader, bufSize int) *StreamDecoder { + streamDec := streamDecPool.Get().(*StreamDecoder) + streamDec.called = 0 + streamDec.keysDone = 0 + streamDec.cursor = 0 + streamDec.err = nil + streamDec.r = r + streamDec.length = 0 + streamDec.isPooled = 0 + streamDec.done = make(chan struct{}, 1) + if bufSize > 0 { + streamDec.data = make([]byte, bufSize) + } + return streamDec +} + +// Release sends back a Decoder to the pool. +// If a decoder is used after calling Release +// a panic will be raised with an InvalidUsagePooledDecoderError error. +func (dec *StreamDecoder) Release() { + dec.isPooled = 1 + streamDecPool.Put(dec) +} diff --git a/vendor/github.com/francoispqt/gojay/decode_string.go b/vendor/github.com/francoispqt/gojay/decode_string.go new file mode 100644 index 0000000..694359c --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/decode_string.go @@ -0,0 +1,260 @@ +package gojay + +import ( + "unsafe" +) + +// DecodeString reads the next JSON-encoded value from the decoder's input (io.Reader) and stores it in the string pointed to by v. +// +// See the documentation for Unmarshal for details about the conversion of JSON into a Go value. +func (dec *Decoder) DecodeString(v *string) error { + if dec.isPooled == 1 { + panic(InvalidUsagePooledDecoderError("Invalid usage of pooled decoder")) + } + return dec.decodeString(v) +} +func (dec *Decoder) decodeString(v *string) error { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case ' ', '\n', '\t', '\r', ',': + // is string + continue + case '"': + dec.cursor++ + start, end, err := dec.getString() + if err != nil { + return err + } + // we do minus one to remove the last quote + d := dec.data[start : end-1] + *v = *(*string)(unsafe.Pointer(&d)) + dec.cursor = end + return nil + // is nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + return nil + default: + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + } + } + return nil +} + +func (dec *Decoder) decodeStringNull(v **string) error { + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + switch dec.data[dec.cursor] { + case ' ', '\n', '\t', '\r', ',': + // is string + continue + case '"': + dec.cursor++ + start, end, err := dec.getString() + + if err != nil { + return err + } + if *v == nil { + *v = new(string) + } + // we do minus one to remove the last quote + d := dec.data[start : end-1] + **v = *(*string)(unsafe.Pointer(&d)) + dec.cursor = end + return nil + // is nil + case 'n': + dec.cursor++ + err := dec.assertNull() + if err != nil { + return err + } + return nil + default: + dec.err = dec.makeInvalidUnmarshalErr(v) + err := dec.skipData() + if err != nil { + return err + } + return nil + } + } + return nil +} + +func (dec *Decoder) parseEscapedString() error { + if dec.cursor >= dec.length && !dec.read() { + return dec.raiseInvalidJSONErr(dec.cursor) + } + switch dec.data[dec.cursor] { + case '"': + dec.data[dec.cursor] = '"' + case '\\': + dec.data[dec.cursor] = '\\' + case '/': + dec.data[dec.cursor] = '/' + case 'b': + dec.data[dec.cursor] = '\b' + case 'f': + dec.data[dec.cursor] = '\f' + case 'n': + dec.data[dec.cursor] = '\n' + case 'r': + dec.data[dec.cursor] = '\r' + case 't': + dec.data[dec.cursor] = '\t' + case 'u': + start := dec.cursor + dec.cursor++ + str, err := dec.parseUnicode() + if err != nil { + return err + } + diff := dec.cursor - start + dec.data = append(append(dec.data[:start-1], str...), dec.data[dec.cursor:]...) + dec.length = len(dec.data) + dec.cursor += len(str) - diff - 1 + + return nil + default: + return dec.raiseInvalidJSONErr(dec.cursor) + } + + dec.data = append(dec.data[:dec.cursor-1], dec.data[dec.cursor:]...) + dec.length-- + + // Since we've lost a character, our dec.cursor offset is now + // 1 past the escaped character which is precisely where we + // want it. + + return nil +} + +func (dec *Decoder) getString() (int, int, error) { + // extract key + var keyStart = dec.cursor + // var str *Builder + for dec.cursor < dec.length || dec.read() { + switch dec.data[dec.cursor] { + // string found + case '"': + dec.cursor = dec.cursor + 1 + return keyStart, dec.cursor, nil + // slash found + case '\\': + dec.cursor = dec.cursor + 1 + err := dec.parseEscapedString() + if err != nil { + return 0, 0, err + } + default: + dec.cursor = dec.cursor + 1 + continue + } + } + return 0, 0, dec.raiseInvalidJSONErr(dec.cursor) +} + +func (dec *Decoder) skipEscapedString() error { + start := dec.cursor + for ; dec.cursor < dec.length || dec.read(); dec.cursor++ { + if dec.data[dec.cursor] != '\\' { + d := dec.data[dec.cursor] + dec.cursor = dec.cursor + 1 + nSlash := dec.cursor - start + switch d { + case '"': + // nSlash must be odd + if nSlash&1 != 1 { + return dec.raiseInvalidJSONErr(dec.cursor) + } + return nil + case 'u': // is unicode, we skip the following characters and place the cursor one one byte backward to avoid it breaking when returning to skipString + if err := dec.skipString(); err != nil { + return err + } + dec.cursor-- + return nil + case 'n', 'r', 't', '/', 'f', 'b': + return nil + default: + // nSlash must be even + if nSlash&1 == 1 { + return dec.raiseInvalidJSONErr(dec.cursor) + } + return nil + } + } + } + return dec.raiseInvalidJSONErr(dec.cursor) +} + +func (dec *Decoder) skipString() error { + for dec.cursor < dec.length || dec.read() { + switch dec.data[dec.cursor] { + // found the closing quote + // let's return + case '"': + dec.cursor = dec.cursor + 1 + return nil + // solidus found start parsing an escaped string + case '\\': + dec.cursor = dec.cursor + 1 + err := dec.skipEscapedString() + if err != nil { + return err + } + default: + dec.cursor = dec.cursor + 1 + continue + } + } + return dec.raiseInvalidJSONErr(len(dec.data) - 1) +} + +// Add Values functions + +// AddString decodes the JSON value within an object or an array to a *string. +// If next key is not a JSON string nor null, InvalidUnmarshalError will be returned. +func (dec *Decoder) AddString(v *string) error { + return dec.String(v) +} + +// AddStringNull decodes the JSON value within an object or an array to a *string. +// If next key is not a JSON string nor null, InvalidUnmarshalError will be returned. +// If a `null` is encountered, gojay does not change the value of the pointer. +func (dec *Decoder) AddStringNull(v **string) error { + return dec.StringNull(v) +} + +// String decodes the JSON value within an object or an array to a *string. +// If next key is not a JSON string nor null, InvalidUnmarshalError will be returned. +func (dec *Decoder) String(v *string) error { + err := dec.decodeString(v) + if err != nil { + return err + } + dec.called |= 1 + return nil +} + +// StringNull decodes the JSON value within an object or an array to a **string. +// If next key is not a JSON string nor null, InvalidUnmarshalError will be returned. +// If a `null` is encountered, gojay does not change the value of the pointer. +func (dec *Decoder) StringNull(v **string) error { + err := dec.decodeStringNull(v) + if err != nil { + return err + } + dec.called |= 1 + return nil +} diff --git a/vendor/github.com/francoispqt/gojay/decode_string_unicode.go b/vendor/github.com/francoispqt/gojay/decode_string_unicode.go new file mode 100644 index 0000000..9e14d52 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/decode_string_unicode.go @@ -0,0 +1,98 @@ +package gojay + +import ( + "unicode/utf16" + "unicode/utf8" +) + +func (dec *Decoder) getUnicode() (rune, error) { + i := 0 + r := rune(0) + for ; (dec.cursor < dec.length || dec.read()) && i < 4; dec.cursor++ { + c := dec.data[dec.cursor] + if c >= '0' && c <= '9' { + r = r*16 + rune(c-'0') + } else if c >= 'a' && c <= 'f' { + r = r*16 + rune(c-'a'+10) + } else if c >= 'A' && c <= 'F' { + r = r*16 + rune(c-'A'+10) + } else { + return 0, InvalidJSONError("Invalid unicode code point") + } + i++ + } + return r, nil +} + +func (dec *Decoder) appendEscapeChar(str []byte, c byte) ([]byte, error) { + switch c { + case 't': + str = append(str, '\t') + case 'n': + str = append(str, '\n') + case 'r': + str = append(str, '\r') + case 'b': + str = append(str, '\b') + case 'f': + str = append(str, '\f') + case '\\': + str = append(str, '\\') + default: + return nil, InvalidJSONError("Invalid JSON") + } + return str, nil +} + +func (dec *Decoder) parseUnicode() ([]byte, error) { + // get unicode after u + r, err := dec.getUnicode() + if err != nil { + return nil, err + } + // no error start making new string + str := make([]byte, 16, 16) + i := 0 + // check if code can be a surrogate utf16 + if utf16.IsSurrogate(r) { + if dec.cursor >= dec.length && !dec.read() { + return nil, dec.raiseInvalidJSONErr(dec.cursor) + } + c := dec.data[dec.cursor] + if c != '\\' { + i += utf8.EncodeRune(str, r) + return str[:i], nil + } + dec.cursor++ + if dec.cursor >= dec.length && !dec.read() { + return nil, dec.raiseInvalidJSONErr(dec.cursor) + } + c = dec.data[dec.cursor] + if c != 'u' { + i += utf8.EncodeRune(str, r) + str, err = dec.appendEscapeChar(str[:i], c) + if err != nil { + dec.err = err + return nil, err + } + i++ + dec.cursor++ + return str[:i], nil + } + dec.cursor++ + r2, err := dec.getUnicode() + if err != nil { + return nil, err + } + combined := utf16.DecodeRune(r, r2) + if combined == '\uFFFD' { + i += utf8.EncodeRune(str, r) + i += utf8.EncodeRune(str, r2) + } else { + i += utf8.EncodeRune(str, combined) + } + return str[:i], nil + } + i += utf8.EncodeRune(str, r) + return str[:i], nil +} diff --git a/vendor/github.com/francoispqt/gojay/decode_time.go b/vendor/github.com/francoispqt/gojay/decode_time.go new file mode 100644 index 0000000..68f906d --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/decode_time.go @@ -0,0 +1,53 @@ +package gojay + +import ( + "time" +) + +// DecodeTime decodes time with the given format +func (dec *Decoder) DecodeTime(v *time.Time, format string) error { + if dec.isPooled == 1 { + panic(InvalidUsagePooledDecoderError("Invalid usage of pooled decoder")) + } + return dec.decodeTime(v, format) +} + +func (dec *Decoder) decodeTime(v *time.Time, format string) error { + if format == time.RFC3339 { + var ej = make(EmbeddedJSON, 0, 20) + if err := dec.decodeEmbeddedJSON(&ej); err != nil { + return err + } + if err := v.UnmarshalJSON(ej); err != nil { + return err + } + return nil + } + var str string + if err := dec.decodeString(&str); err != nil { + return err + } + tt, err := time.Parse(format, str) + if err != nil { + return err + } + *v = tt + return nil +} + +// Add Values functions + +// AddTime decodes the JSON value within an object or an array to a *time.Time with the given format +func (dec *Decoder) AddTime(v *time.Time, format string) error { + return dec.Time(v, format) +} + +// Time decodes the JSON value within an object or an array to a *time.Time with the given format +func (dec *Decoder) Time(v *time.Time, format string) error { + err := dec.decodeTime(v, format) + if err != nil { + return err + } + dec.called |= 1 + return nil +} diff --git a/vendor/github.com/francoispqt/gojay/decode_unsafe.go b/vendor/github.com/francoispqt/gojay/decode_unsafe.go new file mode 100644 index 0000000..54448fb --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/decode_unsafe.go @@ -0,0 +1,120 @@ +package gojay + +import ( + "fmt" +) + +// Unsafe is the structure holding the unsafe version of the API. +// The difference between unsafe api and regular api is that the regular API +// copies the buffer passed to Unmarshal functions to a new internal buffer. +// Making it safer because internally GoJay uses unsafe.Pointer to transform slice of bytes into a string. +var Unsafe = decUnsafe{} + +type decUnsafe struct{} + +func (u decUnsafe) UnmarshalJSONArray(data []byte, v UnmarshalerJSONArray) error { + dec := borrowDecoder(nil, 0) + defer dec.Release() + dec.data = data + dec.length = len(data) + _, err := dec.decodeArray(v) + return err +} + +func (u decUnsafe) UnmarshalJSONObject(data []byte, v UnmarshalerJSONObject) error { + dec := borrowDecoder(nil, 0) + defer dec.Release() + dec.data = data + dec.length = len(data) + _, err := dec.decodeObject(v) + return err +} + +func (u decUnsafe) Unmarshal(data []byte, v interface{}) error { + var err error + var dec *Decoder + switch vt := v.(type) { + case *string: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeString(vt) + case *int: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeInt(vt) + case *int8: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeInt8(vt) + case *int16: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeInt16(vt) + case *int32: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeInt32(vt) + case *int64: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeInt64(vt) + case *uint8: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeUint8(vt) + case *uint16: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeUint16(vt) + case *uint32: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeUint32(vt) + case *uint64: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeUint64(vt) + case *float64: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeFloat64(vt) + case *float32: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeFloat32(vt) + case *bool: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + err = dec.decodeBool(vt) + case UnmarshalerJSONObject: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + _, err = dec.decodeObject(vt) + case UnmarshalerJSONArray: + dec = borrowDecoder(nil, 0) + dec.length = len(data) + dec.data = data + _, err = dec.decodeArray(vt) + default: + return InvalidUnmarshalError(fmt.Sprintf(invalidUnmarshalErrorMsg, vt)) + } + defer dec.Release() + if err != nil { + return err + } + return dec.err +} diff --git a/vendor/github.com/francoispqt/gojay/encode.go b/vendor/github.com/francoispqt/gojay/encode.go new file mode 100644 index 0000000..92edaaf --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/encode.go @@ -0,0 +1,202 @@ +package gojay + +import ( + "encoding/json" + "fmt" + "io" +) + +var nullBytes = []byte("null") + +// MarshalJSONArray returns the JSON encoding of v, an implementation of MarshalerJSONArray. +// +// +// Example: +// type TestSlice []*TestStruct +// +// func (t TestSlice) MarshalJSONArray(enc *Encoder) { +// for _, e := range t { +// enc.AddObject(e) +// } +// } +// +// func main() { +// test := &TestSlice{ +// &TestStruct{123456}, +// &TestStruct{7890}, +// } +// b, _ := Marshal(test) +// fmt.Println(b) // [{"id":123456},{"id":7890}] +// } +func MarshalJSONArray(v MarshalerJSONArray) ([]byte, error) { + enc := BorrowEncoder(nil) + enc.grow(512) + enc.writeByte('[') + v.(MarshalerJSONArray).MarshalJSONArray(enc) + enc.writeByte(']') + + defer func() { + enc.buf = make([]byte, 0, 512) + enc.Release() + }() + + return enc.buf, nil +} + +// MarshalJSONObject returns the JSON encoding of v, an implementation of MarshalerJSONObject. +// +// Example: +// type Object struct { +// id int +// } +// func (s *Object) MarshalJSONObject(enc *gojay.Encoder) { +// enc.IntKey("id", s.id) +// } +// func (s *Object) IsNil() bool { +// return s == nil +// } +// +// func main() { +// test := &Object{ +// id: 123456, +// } +// b, _ := gojay.Marshal(test) +// fmt.Println(b) // {"id":123456} +// } +func MarshalJSONObject(v MarshalerJSONObject) ([]byte, error) { + enc := BorrowEncoder(nil) + enc.grow(512) + + defer func() { + enc.buf = make([]byte, 0, 512) + enc.Release() + }() + + return enc.encodeObject(v) +} + +// Marshal returns the JSON encoding of v. +// +// If v is nil, not an implementation MarshalerJSONObject or MarshalerJSONArray or not one of the following types: +// string, int, int8, int16, int32, int64, uint8, uint16, uint32, uint64, float64, float32, bool +// Marshal returns an InvalidMarshalError. +func Marshal(v interface{}) ([]byte, error) { + return marshal(v, false) +} + +// MarshalAny returns the JSON encoding of v. +// +// If v is nil, not an implementation MarshalerJSONObject or MarshalerJSONArray or not one of the following types: +// string, int, int8, int16, int32, int64, uint8, uint16, uint32, uint64, float64, float32, bool +// MarshalAny falls back to "json/encoding" package to marshal the value. +func MarshalAny(v interface{}) ([]byte, error) { + return marshal(v, true) +} + +func marshal(v interface{}, any bool) ([]byte, error) { + var ( + enc = BorrowEncoder(nil) + + buf []byte + err error + ) + + defer func() { + enc.buf = make([]byte, 0, 512) + enc.Release() + }() + + buf, err = func() ([]byte, error) { + switch vt := v.(type) { + case MarshalerJSONObject: + return enc.encodeObject(vt) + case MarshalerJSONArray: + return enc.encodeArray(vt) + case string: + return enc.encodeString(vt) + case bool: + return enc.encodeBool(vt) + case int: + return enc.encodeInt(vt) + case int64: + return enc.encodeInt64(vt) + case int32: + return enc.encodeInt(int(vt)) + case int16: + return enc.encodeInt(int(vt)) + case int8: + return enc.encodeInt(int(vt)) + case uint64: + return enc.encodeInt(int(vt)) + case uint32: + return enc.encodeInt(int(vt)) + case uint16: + return enc.encodeInt(int(vt)) + case uint8: + return enc.encodeInt(int(vt)) + case float64: + return enc.encodeFloat(vt) + case float32: + return enc.encodeFloat32(vt) + case *EmbeddedJSON: + return enc.encodeEmbeddedJSON(vt) + default: + if any { + return json.Marshal(vt) + } + + return nil, InvalidMarshalError(fmt.Sprintf(invalidMarshalErrorMsg, vt)) + } + }() + return buf, err +} + +// MarshalerJSONObject is the interface to implement for struct to be encoded +type MarshalerJSONObject interface { + MarshalJSONObject(enc *Encoder) + IsNil() bool +} + +// MarshalerJSONArray is the interface to implement +// for a slice or an array to be encoded +type MarshalerJSONArray interface { + MarshalJSONArray(enc *Encoder) + IsNil() bool +} + +// An Encoder writes JSON values to an output stream. +type Encoder struct { + buf []byte + isPooled byte + w io.Writer + err error + hasKeys bool + keys []string +} + +// AppendBytes allows a modular usage by appending bytes manually to the current state of the buffer. +func (enc *Encoder) AppendBytes(b []byte) { + enc.writeBytes(b) +} + +// AppendByte allows a modular usage by appending a single byte manually to the current state of the buffer. +func (enc *Encoder) AppendByte(b byte) { + enc.writeByte(b) +} + +// Buf returns the Encoder's buffer. +func (enc *Encoder) Buf() []byte { + return enc.buf +} + +// Write writes to the io.Writer and resets the buffer. +func (enc *Encoder) Write() (int, error) { + i, err := enc.w.Write(enc.buf) + enc.buf = enc.buf[:0] + return i, err +} + +func (enc *Encoder) getPreviousRune() byte { + last := len(enc.buf) - 1 + return enc.buf[last] +} diff --git a/vendor/github.com/francoispqt/gojay/encode_array.go b/vendor/github.com/francoispqt/gojay/encode_array.go new file mode 100644 index 0000000..5e9d49e --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/encode_array.go @@ -0,0 +1,212 @@ +package gojay + +// EncodeArray encodes an implementation of MarshalerJSONArray to JSON +func (enc *Encoder) EncodeArray(v MarshalerJSONArray) error { + if enc.isPooled == 1 { + panic(InvalidUsagePooledEncoderError("Invalid usage of pooled encoder")) + } + _, _ = enc.encodeArray(v) + _, err := enc.Write() + if err != nil { + enc.err = err + return err + } + return nil +} +func (enc *Encoder) encodeArray(v MarshalerJSONArray) ([]byte, error) { + enc.grow(200) + enc.writeByte('[') + v.MarshalJSONArray(enc) + enc.writeByte(']') + return enc.buf, enc.err +} + +// AddArray adds an implementation of MarshalerJSONArray to be encoded, must be used inside a slice or array encoding (does not encode a key) +// value must implement Marshaler +func (enc *Encoder) AddArray(v MarshalerJSONArray) { + enc.Array(v) +} + +// AddArrayOmitEmpty adds an array or slice to be encoded, must be used inside a slice or array encoding (does not encode a key) +// value must implement MarshalerAddArrayOmitEmpty +func (enc *Encoder) AddArrayOmitEmpty(v MarshalerJSONArray) { + enc.ArrayOmitEmpty(v) +} + +// AddArrayNullEmpty adds an array or slice to be encoded, must be used inside a slice or array encoding (does not encode a key) +// value must implement Marshaler, if v is empty, `null` will be encoded` +func (enc *Encoder) AddArrayNullEmpty(v MarshalerJSONArray) { + enc.ArrayNullEmpty(v) +} + +// AddArrayKey adds an array or slice to be encoded, must be used inside an object as it will encode a key +// value must implement Marshaler +func (enc *Encoder) AddArrayKey(key string, v MarshalerJSONArray) { + enc.ArrayKey(key, v) +} + +// AddArrayKeyOmitEmpty adds an array or slice to be encoded and skips it if it is nil. +// Must be called inside an object as it will encode a key. +func (enc *Encoder) AddArrayKeyOmitEmpty(key string, v MarshalerJSONArray) { + enc.ArrayKeyOmitEmpty(key, v) +} + +// AddArrayKeyNullEmpty adds an array or slice to be encoded and skips it if it is nil. +// Must be called inside an object as it will encode a key. `null` will be encoded` +func (enc *Encoder) AddArrayKeyNullEmpty(key string, v MarshalerJSONArray) { + enc.ArrayKeyNullEmpty(key, v) +} + +// Array adds an implementation of MarshalerJSONArray to be encoded, must be used inside a slice or array encoding (does not encode a key) +// value must implement Marshaler +func (enc *Encoder) Array(v MarshalerJSONArray) { + if v.IsNil() { + enc.grow(3) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + enc.writeByte('[') + enc.writeByte(']') + return + } + enc.grow(100) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + enc.writeByte('[') + v.MarshalJSONArray(enc) + enc.writeByte(']') +} + +// ArrayOmitEmpty adds an array or slice to be encoded, must be used inside a slice or array encoding (does not encode a key) +// value must implement Marshaler +func (enc *Encoder) ArrayOmitEmpty(v MarshalerJSONArray) { + if v.IsNil() { + return + } + enc.grow(4) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + enc.writeByte('[') + v.MarshalJSONArray(enc) + enc.writeByte(']') +} + +// ArrayNullEmpty adds an array or slice to be encoded, must be used inside a slice or array encoding (does not encode a key) +// value must implement Marshaler +func (enc *Encoder) ArrayNullEmpty(v MarshalerJSONArray) { + enc.grow(4) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + if v.IsNil() { + enc.writeBytes(nullBytes) + return + } + enc.writeByte('[') + v.MarshalJSONArray(enc) + enc.writeByte(']') +} + +// ArrayKey adds an array or slice to be encoded, must be used inside an object as it will encode a key +// value must implement Marshaler +func (enc *Encoder) ArrayKey(key string, v MarshalerJSONArray) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + if v.IsNil() { + enc.grow(2 + len(key)) + r := enc.getPreviousRune() + if r != '{' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKeyArr) + enc.writeByte(']') + return + } + enc.grow(5 + len(key)) + r := enc.getPreviousRune() + if r != '{' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKeyArr) + v.MarshalJSONArray(enc) + enc.writeByte(']') +} + +// ArrayKeyOmitEmpty adds an array or slice to be encoded and skips if it is nil. +// Must be called inside an object as it will encode a key. +func (enc *Encoder) ArrayKeyOmitEmpty(key string, v MarshalerJSONArray) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + if v.IsNil() { + return + } + enc.grow(5 + len(key)) + r := enc.getPreviousRune() + if r != '{' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKeyArr) + v.MarshalJSONArray(enc) + enc.writeByte(']') +} + +// ArrayKeyNullEmpty adds an array or slice to be encoded and encodes `null`` if it is nil. +// Must be called inside an object as it will encode a key. +func (enc *Encoder) ArrayKeyNullEmpty(key string, v MarshalerJSONArray) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + enc.grow(5 + len(key)) + r := enc.getPreviousRune() + if r != '{' { + enc.writeByte(',') + } + if v.IsNil() { + enc.writeBytes(nullBytes) + return + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKeyArr) + v.MarshalJSONArray(enc) + enc.writeByte(']') +} + +// EncodeArrayFunc is a custom func type implementing MarshaleArray. +// Use it to cast a func(*Encoder) to Marshal an object. +// +// enc := gojay.NewEncoder(io.Writer) +// enc.EncodeArray(gojay.EncodeArrayFunc(func(enc *gojay.Encoder) { +// enc.AddStringKey("hello", "world") +// })) +type EncodeArrayFunc func(*Encoder) + +// MarshalJSONArray implements MarshalerJSONArray. +func (f EncodeArrayFunc) MarshalJSONArray(enc *Encoder) { + f(enc) +} + +// IsNil implements MarshalerJSONArray. +func (f EncodeArrayFunc) IsNil() bool { + return f == nil +} diff --git a/vendor/github.com/francoispqt/gojay/encode_bool.go b/vendor/github.com/francoispqt/gojay/encode_bool.go new file mode 100644 index 0000000..253e037 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/encode_bool.go @@ -0,0 +1,164 @@ +package gojay + +import "strconv" + +// EncodeBool encodes a bool to JSON +func (enc *Encoder) EncodeBool(v bool) error { + if enc.isPooled == 1 { + panic(InvalidUsagePooledEncoderError("Invalid usage of pooled encoder")) + } + _, _ = enc.encodeBool(v) + _, err := enc.Write() + if err != nil { + enc.err = err + return err + } + return nil +} + +// encodeBool encodes a bool to JSON +func (enc *Encoder) encodeBool(v bool) ([]byte, error) { + enc.grow(5) + if v { + enc.writeString("true") + } else { + enc.writeString("false") + } + return enc.buf, enc.err +} + +// AddBool adds a bool to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddBool(v bool) { + enc.Bool(v) +} + +// AddBoolOmitEmpty adds a bool to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddBoolOmitEmpty(v bool) { + enc.BoolOmitEmpty(v) +} + +// AddBoolNullEmpty adds a bool to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddBoolNullEmpty(v bool) { + enc.BoolNullEmpty(v) +} + +// AddBoolKey adds a bool to be encoded, must be used inside an object as it will encode a key. +func (enc *Encoder) AddBoolKey(key string, v bool) { + enc.BoolKey(key, v) +} + +// AddBoolKeyOmitEmpty adds a bool to be encoded and skips if it is zero value. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) AddBoolKeyOmitEmpty(key string, v bool) { + enc.BoolKeyOmitEmpty(key, v) +} + +// AddBoolKeyNullEmpty adds a bool to be encoded and encodes `null` if it is zero value. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) AddBoolKeyNullEmpty(key string, v bool) { + enc.BoolKeyNullEmpty(key, v) +} + +// Bool adds a bool to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) Bool(v bool) { + enc.grow(5) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + if v { + enc.writeString("true") + } else { + enc.writeString("false") + } +} + +// BoolOmitEmpty adds a bool to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) BoolOmitEmpty(v bool) { + if v == false { + return + } + enc.grow(5) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + enc.writeString("true") +} + +// BoolNullEmpty adds a bool to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) BoolNullEmpty(v bool) { + enc.grow(5) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + if v == false { + enc.writeBytes(nullBytes) + return + } + enc.writeString("true") +} + +// BoolKey adds a bool to be encoded, must be used inside an object as it will encode a key. +func (enc *Encoder) BoolKey(key string, value bool) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + enc.grow(5 + len(key)) + r := enc.getPreviousRune() + if r != '{' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKey) + enc.buf = strconv.AppendBool(enc.buf, value) +} + +// BoolKeyOmitEmpty adds a bool to be encoded and skips it if it is zero value. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) BoolKeyOmitEmpty(key string, v bool) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + if v == false { + return + } + enc.grow(5 + len(key)) + r := enc.getPreviousRune() + if r != '{' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKey) + enc.buf = strconv.AppendBool(enc.buf, v) +} + +// BoolKeyNullEmpty adds a bool to be encoded and skips it if it is zero value. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) BoolKeyNullEmpty(key string, v bool) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + enc.grow(5 + len(key)) + r := enc.getPreviousRune() + if r != '{' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKey) + if v == false { + enc.writeBytes(nullBytes) + return + } + enc.buf = strconv.AppendBool(enc.buf, v) +} diff --git a/vendor/github.com/francoispqt/gojay/encode_builder.go b/vendor/github.com/francoispqt/gojay/encode_builder.go new file mode 100644 index 0000000..2895ba3 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/encode_builder.go @@ -0,0 +1,65 @@ +package gojay + +const hex = "0123456789abcdef" + +// grow grows b's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to b +// without another allocation. If n is negative, grow panics. +func (enc *Encoder) grow(n int) { + if cap(enc.buf)-len(enc.buf) < n { + Buf := make([]byte, len(enc.buf), 2*cap(enc.buf)+n) + copy(Buf, enc.buf) + enc.buf = Buf + } +} + +// Write appends the contents of p to b's Buffer. +// Write always returns len(p), nil. +func (enc *Encoder) writeBytes(p []byte) { + enc.buf = append(enc.buf, p...) +} + +func (enc *Encoder) writeTwoBytes(b1 byte, b2 byte) { + enc.buf = append(enc.buf, b1, b2) +} + +// WriteByte appends the byte c to b's Buffer. +// The returned error is always nil. +func (enc *Encoder) writeByte(c byte) { + enc.buf = append(enc.buf, c) +} + +// WriteString appends the contents of s to b's Buffer. +// It returns the length of s and a nil error. +func (enc *Encoder) writeString(s string) { + enc.buf = append(enc.buf, s...) +} + +func (enc *Encoder) writeStringEscape(s string) { + l := len(s) + for i := 0; i < l; i++ { + c := s[i] + if c >= 0x20 && c != '\\' && c != '"' { + enc.writeByte(c) + continue + } + switch c { + case '\\', '"': + enc.writeTwoBytes('\\', c) + case '\n': + enc.writeTwoBytes('\\', 'n') + case '\f': + enc.writeTwoBytes('\\', 'f') + case '\b': + enc.writeTwoBytes('\\', 'b') + case '\r': + enc.writeTwoBytes('\\', 'r') + case '\t': + enc.writeTwoBytes('\\', 't') + default: + enc.writeString(`\u00`) + enc.writeTwoBytes(hex[c>>4], hex[c&0xF]) + } + continue + } +} diff --git a/vendor/github.com/francoispqt/gojay/encode_embedded_json.go b/vendor/github.com/francoispqt/gojay/encode_embedded_json.go new file mode 100644 index 0000000..4c99a05 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/encode_embedded_json.go @@ -0,0 +1,93 @@ +package gojay + +// EncodeEmbeddedJSON encodes an embedded JSON. +// is basically sets the internal buf as the value pointed by v and calls the io.Writer.Write() +func (enc *Encoder) EncodeEmbeddedJSON(v *EmbeddedJSON) error { + if enc.isPooled == 1 { + panic(InvalidUsagePooledEncoderError("Invalid usage of pooled encoder")) + } + enc.buf = *v + _, err := enc.Write() + if err != nil { + return err + } + return nil +} + +func (enc *Encoder) encodeEmbeddedJSON(v *EmbeddedJSON) ([]byte, error) { + enc.writeBytes(*v) + return enc.buf, nil +} + +// AddEmbeddedJSON adds an EmbeddedJSON to be encoded. +// +// It basically blindly writes the bytes to the final buffer. Therefore, +// it expects the JSON to be of proper format. +func (enc *Encoder) AddEmbeddedJSON(v *EmbeddedJSON) { + enc.grow(len(*v) + 4) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + enc.writeBytes(*v) +} + +// AddEmbeddedJSONOmitEmpty adds an EmbeddedJSON to be encoded or skips it if nil pointer or empty. +// +// It basically blindly writes the bytes to the final buffer. Therefore, +// it expects the JSON to be of proper format. +func (enc *Encoder) AddEmbeddedJSONOmitEmpty(v *EmbeddedJSON) { + if v == nil || len(*v) == 0 { + return + } + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + enc.writeBytes(*v) +} + +// AddEmbeddedJSONKey adds an EmbeddedJSON and a key to be encoded. +// +// It basically blindly writes the bytes to the final buffer. Therefore, +// it expects the JSON to be of proper format. +func (enc *Encoder) AddEmbeddedJSONKey(key string, v *EmbeddedJSON) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + enc.grow(len(key) + len(*v) + 5) + r := enc.getPreviousRune() + if r != '{' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKey) + enc.writeBytes(*v) +} + +// AddEmbeddedJSONKeyOmitEmpty adds an EmbeddedJSON and a key to be encoded or skips it if nil pointer or empty. +// +// It basically blindly writes the bytes to the final buffer. Therefore, +// it expects the JSON to be of proper format. +func (enc *Encoder) AddEmbeddedJSONKeyOmitEmpty(key string, v *EmbeddedJSON) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + if v == nil || len(*v) == 0 { + return + } + enc.grow(len(key) + len(*v) + 5) + r := enc.getPreviousRune() + if r != '{' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKey) + enc.writeBytes(*v) +} diff --git a/vendor/github.com/francoispqt/gojay/encode_interface.go b/vendor/github.com/francoispqt/gojay/encode_interface.go new file mode 100644 index 0000000..c4692e5 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/encode_interface.go @@ -0,0 +1,173 @@ +package gojay + +import ( + "fmt" +) + +// Encode encodes a value to JSON. +// +// If Encode cannot find a way to encode the type to JSON +// it will return an InvalidMarshalError. +func (enc *Encoder) Encode(v interface{}) error { + if enc.isPooled == 1 { + panic(InvalidUsagePooledEncoderError("Invalid usage of pooled encoder")) + } + switch vt := v.(type) { + case string: + return enc.EncodeString(vt) + case bool: + return enc.EncodeBool(vt) + case MarshalerJSONArray: + return enc.EncodeArray(vt) + case MarshalerJSONObject: + return enc.EncodeObject(vt) + case int: + return enc.EncodeInt(vt) + case int64: + return enc.EncodeInt64(vt) + case int32: + return enc.EncodeInt(int(vt)) + case int8: + return enc.EncodeInt(int(vt)) + case uint64: + return enc.EncodeUint64(vt) + case uint32: + return enc.EncodeInt(int(vt)) + case uint16: + return enc.EncodeInt(int(vt)) + case uint8: + return enc.EncodeInt(int(vt)) + case float64: + return enc.EncodeFloat(vt) + case float32: + return enc.EncodeFloat32(vt) + case *EmbeddedJSON: + return enc.EncodeEmbeddedJSON(vt) + default: + return InvalidMarshalError(fmt.Sprintf(invalidMarshalErrorMsg, vt)) + } +} + +// AddInterface adds an interface{} to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddInterface(value interface{}) { + switch vt := value.(type) { + case string: + enc.AddString(vt) + case bool: + enc.AddBool(vt) + case MarshalerJSONArray: + enc.AddArray(vt) + case MarshalerJSONObject: + enc.AddObject(vt) + case int: + enc.AddInt(vt) + case int64: + enc.AddInt(int(vt)) + case int32: + enc.AddInt(int(vt)) + case int8: + enc.AddInt(int(vt)) + case uint64: + enc.AddUint64(vt) + case uint32: + enc.AddInt(int(vt)) + case uint16: + enc.AddInt(int(vt)) + case uint8: + enc.AddInt(int(vt)) + case float64: + enc.AddFloat(vt) + case float32: + enc.AddFloat32(vt) + default: + if vt != nil { + enc.err = InvalidMarshalError(fmt.Sprintf(invalidMarshalErrorMsg, vt)) + return + } + return + } +} + +// AddInterfaceKey adds an interface{} to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) AddInterfaceKey(key string, value interface{}) { + switch vt := value.(type) { + case string: + enc.AddStringKey(key, vt) + case bool: + enc.AddBoolKey(key, vt) + case MarshalerJSONArray: + enc.AddArrayKey(key, vt) + case MarshalerJSONObject: + enc.AddObjectKey(key, vt) + case int: + enc.AddIntKey(key, vt) + case int64: + enc.AddIntKey(key, int(vt)) + case int32: + enc.AddIntKey(key, int(vt)) + case int16: + enc.AddIntKey(key, int(vt)) + case int8: + enc.AddIntKey(key, int(vt)) + case uint64: + enc.AddIntKey(key, int(vt)) + case uint32: + enc.AddIntKey(key, int(vt)) + case uint16: + enc.AddIntKey(key, int(vt)) + case uint8: + enc.AddIntKey(key, int(vt)) + case float64: + enc.AddFloatKey(key, vt) + case float32: + enc.AddFloat32Key(key, vt) + default: + if vt != nil { + enc.err = InvalidMarshalError(fmt.Sprintf(invalidMarshalErrorMsg, vt)) + return + } + return + } +} + +// AddInterfaceKeyOmitEmpty adds an interface{} to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) AddInterfaceKeyOmitEmpty(key string, v interface{}) { + switch vt := v.(type) { + case string: + enc.AddStringKeyOmitEmpty(key, vt) + case bool: + enc.AddBoolKeyOmitEmpty(key, vt) + case MarshalerJSONArray: + enc.AddArrayKeyOmitEmpty(key, vt) + case MarshalerJSONObject: + enc.AddObjectKeyOmitEmpty(key, vt) + case int: + enc.AddIntKeyOmitEmpty(key, vt) + case int64: + enc.AddIntKeyOmitEmpty(key, int(vt)) + case int32: + enc.AddIntKeyOmitEmpty(key, int(vt)) + case int16: + enc.AddIntKeyOmitEmpty(key, int(vt)) + case int8: + enc.AddIntKeyOmitEmpty(key, int(vt)) + case uint64: + enc.AddIntKeyOmitEmpty(key, int(vt)) + case uint32: + enc.AddIntKeyOmitEmpty(key, int(vt)) + case uint16: + enc.AddIntKeyOmitEmpty(key, int(vt)) + case uint8: + enc.AddIntKeyOmitEmpty(key, int(vt)) + case float64: + enc.AddFloatKeyOmitEmpty(key, vt) + case float32: + enc.AddFloat32KeyOmitEmpty(key, vt) + default: + if vt != nil { + enc.err = InvalidMarshalError(fmt.Sprintf(invalidMarshalErrorMsg, vt)) + return + } + return + } +} diff --git a/vendor/github.com/francoispqt/gojay/encode_null.go b/vendor/github.com/francoispqt/gojay/encode_null.go new file mode 100644 index 0000000..cec4e63 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/encode_null.go @@ -0,0 +1,39 @@ +package gojay + +// AddNull adds a `null` to be encoded. Must be used while encoding an array.` +func (enc *Encoder) AddNull() { + enc.Null() +} + +// Null adds a `null` to be encoded. Must be used while encoding an array.` +func (enc *Encoder) Null() { + enc.grow(5) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + enc.writeBytes(nullBytes) +} + +// AddNullKey adds a `null` to be encoded. Must be used while encoding an array.` +func (enc *Encoder) AddNullKey(key string) { + enc.NullKey(key) +} + +// NullKey adds a `null` to be encoded. Must be used while encoding an array.` +func (enc *Encoder) NullKey(key string) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + enc.grow(5 + len(key)) + r := enc.getPreviousRune() + if r != '{' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKey) + enc.writeBytes(nullBytes) +} diff --git a/vendor/github.com/francoispqt/gojay/encode_number.go b/vendor/github.com/francoispqt/gojay/encode_number.go new file mode 100644 index 0000000..53affb9 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/encode_number.go @@ -0,0 +1 @@ +package gojay diff --git a/vendor/github.com/francoispqt/gojay/encode_number_float.go b/vendor/github.com/francoispqt/gojay/encode_number_float.go new file mode 100644 index 0000000..b45f844 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/encode_number_float.go @@ -0,0 +1,368 @@ +package gojay + +import "strconv" + +// EncodeFloat encodes a float64 to JSON +func (enc *Encoder) EncodeFloat(n float64) error { + if enc.isPooled == 1 { + panic(InvalidUsagePooledEncoderError("Invalid usage of pooled encoder")) + } + _, _ = enc.encodeFloat(n) + _, err := enc.Write() + if err != nil { + return err + } + return nil +} + +// encodeFloat encodes a float64 to JSON +func (enc *Encoder) encodeFloat(n float64) ([]byte, error) { + enc.buf = strconv.AppendFloat(enc.buf, n, 'f', -1, 64) + return enc.buf, nil +} + +// EncodeFloat32 encodes a float32 to JSON +func (enc *Encoder) EncodeFloat32(n float32) error { + if enc.isPooled == 1 { + panic(InvalidUsagePooledEncoderError("Invalid usage of pooled encoder")) + } + _, _ = enc.encodeFloat32(n) + _, err := enc.Write() + if err != nil { + return err + } + return nil +} + +func (enc *Encoder) encodeFloat32(n float32) ([]byte, error) { + enc.buf = strconv.AppendFloat(enc.buf, float64(n), 'f', -1, 32) + return enc.buf, nil +} + +// AddFloat adds a float64 to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddFloat(v float64) { + enc.Float64(v) +} + +// AddFloatOmitEmpty adds a float64 to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) AddFloatOmitEmpty(v float64) { + enc.Float64OmitEmpty(v) +} + +// AddFloatNullEmpty adds a float64 to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) AddFloatNullEmpty(v float64) { + enc.Float64NullEmpty(v) +} + +// Float adds a float64 to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) Float(v float64) { + enc.Float64(v) +} + +// FloatOmitEmpty adds a float64 to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) FloatOmitEmpty(v float64) { + enc.Float64OmitEmpty(v) +} + +// FloatNullEmpty adds a float64 to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) FloatNullEmpty(v float64) { + enc.Float64NullEmpty(v) +} + +// AddFloatKey adds a float64 to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) AddFloatKey(key string, v float64) { + enc.Float64Key(key, v) +} + +// AddFloatKeyOmitEmpty adds a float64 to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key +func (enc *Encoder) AddFloatKeyOmitEmpty(key string, v float64) { + enc.Float64KeyOmitEmpty(key, v) +} + +// AddFloatKeyNullEmpty adds a float64 to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key +func (enc *Encoder) AddFloatKeyNullEmpty(key string, v float64) { + enc.Float64KeyNullEmpty(key, v) +} + +// FloatKey adds a float64 to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) FloatKey(key string, v float64) { + enc.Float64Key(key, v) +} + +// FloatKeyOmitEmpty adds a float64 to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key +func (enc *Encoder) FloatKeyOmitEmpty(key string, v float64) { + enc.Float64KeyOmitEmpty(key, v) +} + +// FloatKeyNullEmpty adds a float64 to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key +func (enc *Encoder) FloatKeyNullEmpty(key string, v float64) { + enc.Float64KeyNullEmpty(key, v) +} + +// AddFloat64 adds a float64 to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddFloat64(v float64) { + enc.Float(v) +} + +// AddFloat64OmitEmpty adds a float64 to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) AddFloat64OmitEmpty(v float64) { + enc.FloatOmitEmpty(v) +} + +// Float64 adds a float64 to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) Float64(v float64) { + enc.grow(10) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + enc.buf = strconv.AppendFloat(enc.buf, v, 'f', -1, 64) +} + +// Float64OmitEmpty adds a float64 to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) Float64OmitEmpty(v float64) { + if v == 0 { + return + } + enc.grow(10) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + enc.buf = strconv.AppendFloat(enc.buf, v, 'f', -1, 64) +} + +// Float64NullEmpty adds a float64 to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) Float64NullEmpty(v float64) { + enc.grow(10) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + if v == 0 { + enc.writeBytes(nullBytes) + return + } + enc.buf = strconv.AppendFloat(enc.buf, v, 'f', -1, 64) +} + +// AddFloat64Key adds a float64 to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) AddFloat64Key(key string, v float64) { + enc.FloatKey(key, v) +} + +// AddFloat64KeyOmitEmpty adds a float64 to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key +func (enc *Encoder) AddFloat64KeyOmitEmpty(key string, v float64) { + enc.FloatKeyOmitEmpty(key, v) +} + +// Float64Key adds a float64 to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) Float64Key(key string, value float64) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + r := enc.getPreviousRune() + if r != '{' { + enc.writeByte(',') + } + enc.grow(10) + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKey) + enc.buf = strconv.AppendFloat(enc.buf, value, 'f', -1, 64) +} + +// Float64KeyOmitEmpty adds a float64 to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key +func (enc *Encoder) Float64KeyOmitEmpty(key string, v float64) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + if v == 0 { + return + } + enc.grow(10 + len(key)) + r := enc.getPreviousRune() + if r != '{' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKey) + enc.buf = strconv.AppendFloat(enc.buf, v, 'f', -1, 64) +} + +// Float64KeyNullEmpty adds a float64 to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) Float64KeyNullEmpty(key string, v float64) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + enc.grow(10 + len(key)) + r := enc.getPreviousRune() + if r != '{' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKey) + if v == 0 { + enc.writeBytes(nullBytes) + return + } + enc.buf = strconv.AppendFloat(enc.buf, v, 'f', -1, 64) +} + +// AddFloat32 adds a float32 to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddFloat32(v float32) { + enc.Float32(v) +} + +// AddFloat32OmitEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) AddFloat32OmitEmpty(v float32) { + enc.Float32OmitEmpty(v) +} + +// AddFloat32NullEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) AddFloat32NullEmpty(v float32) { + enc.Float32NullEmpty(v) +} + +// Float32 adds a float32 to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) Float32(v float32) { + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + enc.buf = strconv.AppendFloat(enc.buf, float64(v), 'f', -1, 32) +} + +// Float32OmitEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) Float32OmitEmpty(v float32) { + if v == 0 { + return + } + enc.grow(10) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + enc.buf = strconv.AppendFloat(enc.buf, float64(v), 'f', -1, 32) +} + +// Float32NullEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) Float32NullEmpty(v float32) { + enc.grow(10) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + if v == 0 { + enc.writeBytes(nullBytes) + return + } + enc.buf = strconv.AppendFloat(enc.buf, float64(v), 'f', -1, 32) +} + +// AddFloat32Key adds a float32 to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) AddFloat32Key(key string, v float32) { + enc.Float32Key(key, v) +} + +// AddFloat32KeyOmitEmpty adds a float64 to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key +func (enc *Encoder) AddFloat32KeyOmitEmpty(key string, v float32) { + enc.Float32KeyOmitEmpty(key, v) +} + +// AddFloat32KeyNullEmpty adds a float64 to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key +func (enc *Encoder) AddFloat32KeyNullEmpty(key string, v float32) { + enc.Float32KeyNullEmpty(key, v) +} + +// Float32Key adds a float32 to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) Float32Key(key string, v float32) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + enc.grow(10 + len(key)) + r := enc.getPreviousRune() + if r != '{' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeByte('"') + enc.writeByte(':') + enc.buf = strconv.AppendFloat(enc.buf, float64(v), 'f', -1, 32) +} + +// Float32KeyOmitEmpty adds a float64 to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key +func (enc *Encoder) Float32KeyOmitEmpty(key string, v float32) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + if v == 0 { + return + } + enc.grow(10 + len(key)) + r := enc.getPreviousRune() + if r != '{' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKey) + enc.buf = strconv.AppendFloat(enc.buf, float64(v), 'f', -1, 32) +} + +// Float32KeyNullEmpty adds a float64 to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key +func (enc *Encoder) Float32KeyNullEmpty(key string, v float32) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + enc.grow(10 + len(key)) + r := enc.getPreviousRune() + if r != '{' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKey) + if v == 0 { + enc.writeBytes(nullBytes) + return + } + enc.buf = strconv.AppendFloat(enc.buf, float64(v), 'f', -1, 32) +} diff --git a/vendor/github.com/francoispqt/gojay/encode_number_int.go b/vendor/github.com/francoispqt/gojay/encode_number_int.go new file mode 100644 index 0000000..2c4bbe3 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/encode_number_int.go @@ -0,0 +1,500 @@ +package gojay + +import "strconv" + +// EncodeInt encodes an int to JSON +func (enc *Encoder) EncodeInt(n int) error { + if enc.isPooled == 1 { + panic(InvalidUsagePooledEncoderError("Invalid usage of pooled encoder")) + } + _, _ = enc.encodeInt(n) + _, err := enc.Write() + if err != nil { + return err + } + return nil +} + +// encodeInt encodes an int to JSON +func (enc *Encoder) encodeInt(n int) ([]byte, error) { + enc.buf = strconv.AppendInt(enc.buf, int64(n), 10) + return enc.buf, nil +} + +// EncodeInt64 encodes an int64 to JSON +func (enc *Encoder) EncodeInt64(n int64) error { + if enc.isPooled == 1 { + panic(InvalidUsagePooledEncoderError("Invalid usage of pooled encoder")) + } + _, _ = enc.encodeInt64(n) + _, err := enc.Write() + if err != nil { + return err + } + return nil +} + +// encodeInt64 encodes an int to JSON +func (enc *Encoder) encodeInt64(n int64) ([]byte, error) { + enc.buf = strconv.AppendInt(enc.buf, n, 10) + return enc.buf, nil +} + +// AddInt adds an int to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddInt(v int) { + enc.Int(v) +} + +// AddIntOmitEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) AddIntOmitEmpty(v int) { + enc.IntOmitEmpty(v) +} + +// AddIntNullEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) AddIntNullEmpty(v int) { + enc.IntNullEmpty(v) +} + +// Int adds an int to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) Int(v int) { + enc.grow(10) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + enc.buf = strconv.AppendInt(enc.buf, int64(v), 10) +} + +// IntOmitEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) IntOmitEmpty(v int) { + if v == 0 { + return + } + enc.grow(10) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + enc.buf = strconv.AppendInt(enc.buf, int64(v), 10) +} + +// IntNullEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) IntNullEmpty(v int) { + enc.grow(10) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + if v == 0 { + enc.writeBytes(nullBytes) + return + } + enc.buf = strconv.AppendInt(enc.buf, int64(v), 10) +} + +// AddIntKey adds an int to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) AddIntKey(key string, v int) { + enc.IntKey(key, v) +} + +// AddIntKeyOmitEmpty adds an int to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) AddIntKeyOmitEmpty(key string, v int) { + enc.IntKeyOmitEmpty(key, v) +} + +// AddIntKeyNullEmpty adds an int to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) AddIntKeyNullEmpty(key string, v int) { + enc.IntKeyNullEmpty(key, v) +} + +// IntKey adds an int to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) IntKey(key string, v int) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + enc.grow(10 + len(key)) + r := enc.getPreviousRune() + if r != '{' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKey) + enc.buf = strconv.AppendInt(enc.buf, int64(v), 10) +} + +// IntKeyOmitEmpty adds an int to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) IntKeyOmitEmpty(key string, v int) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + if v == 0 { + return + } + enc.grow(10 + len(key)) + r := enc.getPreviousRune() + if r != '{' && r != '[' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKey) + enc.buf = strconv.AppendInt(enc.buf, int64(v), 10) +} + +// IntKeyNullEmpty adds an int to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) IntKeyNullEmpty(key string, v int) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + enc.grow(10 + len(key)) + r := enc.getPreviousRune() + if r != '{' && r != '[' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKey) + if v == 0 { + enc.writeBytes(nullBytes) + return + } + enc.buf = strconv.AppendInt(enc.buf, int64(v), 10) +} + +// AddInt64 adds an int to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddInt64(v int64) { + enc.Int64(v) +} + +// AddInt64OmitEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) AddInt64OmitEmpty(v int64) { + enc.Int64OmitEmpty(v) +} + +// AddInt64NullEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) AddInt64NullEmpty(v int64) { + enc.Int64NullEmpty(v) +} + +// Int64 adds an int to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) Int64(v int64) { + enc.grow(10) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + enc.buf = strconv.AppendInt(enc.buf, v, 10) +} + +// Int64OmitEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) Int64OmitEmpty(v int64) { + if v == 0 { + return + } + enc.grow(10) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + enc.buf = strconv.AppendInt(enc.buf, v, 10) +} + +// Int64NullEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) Int64NullEmpty(v int64) { + enc.grow(10) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + if v == 0 { + enc.writeBytes(nullBytes) + return + } + enc.buf = strconv.AppendInt(enc.buf, v, 10) +} + +// AddInt64Key adds an int64 to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) AddInt64Key(key string, v int64) { + enc.Int64Key(key, v) +} + +// AddInt64KeyOmitEmpty adds an int64 to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) AddInt64KeyOmitEmpty(key string, v int64) { + enc.Int64KeyOmitEmpty(key, v) +} + +// AddInt64KeyNullEmpty adds an int64 to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) AddInt64KeyNullEmpty(key string, v int64) { + enc.Int64KeyNullEmpty(key, v) +} + +// Int64Key adds an int64 to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) Int64Key(key string, v int64) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + enc.grow(10 + len(key)) + r := enc.getPreviousRune() + if r != '{' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKey) + enc.buf = strconv.AppendInt(enc.buf, v, 10) +} + +// Int64KeyOmitEmpty adds an int64 to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) Int64KeyOmitEmpty(key string, v int64) { + if v == 0 { + return + } + enc.grow(10 + len(key)) + r := enc.getPreviousRune() + if r != '{' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKey) + enc.buf = strconv.AppendInt(enc.buf, v, 10) +} + +// Int64KeyNullEmpty adds an int64 to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) Int64KeyNullEmpty(key string, v int64) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + enc.grow(10 + len(key)) + r := enc.getPreviousRune() + if r != '{' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKey) + if v == 0 { + enc.writeBytes(nullBytes) + return + } + enc.buf = strconv.AppendInt(enc.buf, v, 10) +} + +// AddInt32 adds an int to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddInt32(v int32) { + enc.Int64(int64(v)) +} + +// AddInt32OmitEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) AddInt32OmitEmpty(v int32) { + enc.Int64OmitEmpty(int64(v)) +} + +// AddInt32NullEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) AddInt32NullEmpty(v int32) { + enc.Int64NullEmpty(int64(v)) +} + +// Int32 adds an int to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) Int32(v int32) { + enc.Int64(int64(v)) +} + +// Int32OmitEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) Int32OmitEmpty(v int32) { + enc.Int64OmitEmpty(int64(v)) +} + +// Int32NullEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) Int32NullEmpty(v int32) { + enc.Int64NullEmpty(int64(v)) +} + +// AddInt32Key adds an int32 to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) AddInt32Key(key string, v int32) { + enc.Int64Key(key, int64(v)) +} + +// AddInt32KeyOmitEmpty adds an int32 to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) AddInt32KeyOmitEmpty(key string, v int32) { + enc.Int64KeyOmitEmpty(key, int64(v)) +} + +// Int32Key adds an int32 to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) Int32Key(key string, v int32) { + enc.Int64Key(key, int64(v)) +} + +// Int32KeyOmitEmpty adds an int32 to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) Int32KeyOmitEmpty(key string, v int32) { + enc.Int64KeyOmitEmpty(key, int64(v)) +} + +// Int32KeyNullEmpty adds an int32 to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) Int32KeyNullEmpty(key string, v int32) { + enc.Int64KeyNullEmpty(key, int64(v)) +} + +// AddInt16 adds an int to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddInt16(v int16) { + enc.Int64(int64(v)) +} + +// AddInt16OmitEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) AddInt16OmitEmpty(v int16) { + enc.Int64OmitEmpty(int64(v)) +} + +// Int16 adds an int to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) Int16(v int16) { + enc.Int64(int64(v)) +} + +// Int16OmitEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) Int16OmitEmpty(v int16) { + enc.Int64OmitEmpty(int64(v)) +} + +// Int16NullEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) Int16NullEmpty(v int16) { + enc.Int64NullEmpty(int64(v)) +} + +// AddInt16Key adds an int16 to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) AddInt16Key(key string, v int16) { + enc.Int64Key(key, int64(v)) +} + +// AddInt16KeyOmitEmpty adds an int16 to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) AddInt16KeyOmitEmpty(key string, v int16) { + enc.Int64KeyOmitEmpty(key, int64(v)) +} + +// AddInt16KeyNullEmpty adds an int16 to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) AddInt16KeyNullEmpty(key string, v int16) { + enc.Int64KeyNullEmpty(key, int64(v)) +} + +// Int16Key adds an int16 to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) Int16Key(key string, v int16) { + enc.Int64Key(key, int64(v)) +} + +// Int16KeyOmitEmpty adds an int16 to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) Int16KeyOmitEmpty(key string, v int16) { + enc.Int64KeyOmitEmpty(key, int64(v)) +} + +// Int16KeyNullEmpty adds an int16 to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) Int16KeyNullEmpty(key string, v int16) { + enc.Int64KeyNullEmpty(key, int64(v)) +} + +// AddInt8 adds an int to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddInt8(v int8) { + enc.Int64(int64(v)) +} + +// AddInt8OmitEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) AddInt8OmitEmpty(v int8) { + enc.Int64OmitEmpty(int64(v)) +} + +// AddInt8NullEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) AddInt8NullEmpty(v int8) { + enc.Int64NullEmpty(int64(v)) +} + +// Int8 adds an int to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) Int8(v int8) { + enc.Int64(int64(v)) +} + +// Int8OmitEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) Int8OmitEmpty(v int8) { + enc.Int64OmitEmpty(int64(v)) +} + +// Int8NullEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) Int8NullEmpty(v int8) { + enc.Int64NullEmpty(int64(v)) +} + +// AddInt8Key adds an int8 to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) AddInt8Key(key string, v int8) { + enc.Int64Key(key, int64(v)) +} + +// AddInt8KeyOmitEmpty adds an int8 to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) AddInt8KeyOmitEmpty(key string, v int8) { + enc.Int64KeyOmitEmpty(key, int64(v)) +} + +// AddInt8KeyNullEmpty adds an int8 to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) AddInt8KeyNullEmpty(key string, v int8) { + enc.Int64KeyNullEmpty(key, int64(v)) +} + +// Int8Key adds an int8 to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) Int8Key(key string, v int8) { + enc.Int64Key(key, int64(v)) +} + +// Int8KeyOmitEmpty adds an int8 to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) Int8KeyOmitEmpty(key string, v int8) { + enc.Int64KeyOmitEmpty(key, int64(v)) +} + +// Int8KeyNullEmpty adds an int8 to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) Int8KeyNullEmpty(key string, v int8) { + enc.Int64KeyNullEmpty(key, int64(v)) +} diff --git a/vendor/github.com/francoispqt/gojay/encode_number_uint.go b/vendor/github.com/francoispqt/gojay/encode_number_uint.go new file mode 100644 index 0000000..cd69b13 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/encode_number_uint.go @@ -0,0 +1,362 @@ +package gojay + +import "strconv" + +// EncodeUint64 encodes an int64 to JSON +func (enc *Encoder) EncodeUint64(n uint64) error { + if enc.isPooled == 1 { + panic(InvalidUsagePooledEncoderError("Invalid usage of pooled encoder")) + } + _, _ = enc.encodeUint64(n) + _, err := enc.Write() + if err != nil { + return err + } + return nil +} + +// encodeUint64 encodes an int to JSON +func (enc *Encoder) encodeUint64(n uint64) ([]byte, error) { + enc.buf = strconv.AppendUint(enc.buf, n, 10) + return enc.buf, nil +} + +// AddUint64 adds an int to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddUint64(v uint64) { + enc.Uint64(v) +} + +// AddUint64OmitEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) AddUint64OmitEmpty(v uint64) { + enc.Uint64OmitEmpty(v) +} + +// AddUint64NullEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) AddUint64NullEmpty(v uint64) { + enc.Uint64NullEmpty(v) +} + +// Uint64 adds an int to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) Uint64(v uint64) { + enc.grow(10) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + enc.buf = strconv.AppendUint(enc.buf, v, 10) +} + +// Uint64OmitEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) Uint64OmitEmpty(v uint64) { + if v == 0 { + return + } + enc.grow(10) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + enc.buf = strconv.AppendUint(enc.buf, v, 10) +} + +// Uint64NullEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) Uint64NullEmpty(v uint64) { + enc.grow(10) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + if v == 0 { + enc.writeBytes(nullBytes) + return + } + enc.buf = strconv.AppendUint(enc.buf, v, 10) +} + +// AddUint64Key adds an int to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) AddUint64Key(key string, v uint64) { + enc.Uint64Key(key, v) +} + +// AddUint64KeyOmitEmpty adds an int to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) AddUint64KeyOmitEmpty(key string, v uint64) { + enc.Uint64KeyOmitEmpty(key, v) +} + +// AddUint64KeyNullEmpty adds an int to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) AddUint64KeyNullEmpty(key string, v uint64) { + enc.Uint64KeyNullEmpty(key, v) +} + +// Uint64Key adds an int to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) Uint64Key(key string, v uint64) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + enc.grow(10 + len(key)) + r := enc.getPreviousRune() + if r != '{' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKey) + enc.buf = strconv.AppendUint(enc.buf, v, 10) +} + +// Uint64KeyOmitEmpty adds an int to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) Uint64KeyOmitEmpty(key string, v uint64) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + if v == 0 { + return + } + enc.grow(10 + len(key)) + r := enc.getPreviousRune() + if r != '{' && r != '[' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKey) + enc.buf = strconv.AppendUint(enc.buf, v, 10) +} + +// Uint64KeyNullEmpty adds an int to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) Uint64KeyNullEmpty(key string, v uint64) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + enc.grow(10 + len(key)) + r := enc.getPreviousRune() + if r != '{' && r != '[' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKey) + if v == 0 { + enc.writeBytes(nullBytes) + return + } + enc.buf = strconv.AppendUint(enc.buf, v, 10) +} + +// AddUint32 adds an int to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddUint32(v uint32) { + enc.Uint64(uint64(v)) +} + +// AddUint32OmitEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) AddUint32OmitEmpty(v uint32) { + enc.Uint64OmitEmpty(uint64(v)) +} + +// AddUint32NullEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) AddUint32NullEmpty(v uint32) { + enc.Uint64NullEmpty(uint64(v)) +} + +// Uint32 adds an int to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) Uint32(v uint32) { + enc.Uint64(uint64(v)) +} + +// Uint32OmitEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) Uint32OmitEmpty(v uint32) { + enc.Uint64OmitEmpty(uint64(v)) +} + +// Uint32NullEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) Uint32NullEmpty(v uint32) { + enc.Uint64NullEmpty(uint64(v)) +} + +// AddUint32Key adds an int to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) AddUint32Key(key string, v uint32) { + enc.Uint64Key(key, uint64(v)) +} + +// AddUint32KeyOmitEmpty adds an int to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) AddUint32KeyOmitEmpty(key string, v uint32) { + enc.Uint64KeyOmitEmpty(key, uint64(v)) +} + +// AddUint32KeyNullEmpty adds an int to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) AddUint32KeyNullEmpty(key string, v uint32) { + enc.Uint64KeyNullEmpty(key, uint64(v)) +} + +// Uint32Key adds an int to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) Uint32Key(key string, v uint32) { + enc.Uint64Key(key, uint64(v)) +} + +// Uint32KeyOmitEmpty adds an int to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) Uint32KeyOmitEmpty(key string, v uint32) { + enc.Uint64KeyOmitEmpty(key, uint64(v)) +} + +// Uint32KeyNullEmpty adds an int to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) Uint32KeyNullEmpty(key string, v uint32) { + enc.Uint64KeyNullEmpty(key, uint64(v)) +} + +// AddUint16 adds an int to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddUint16(v uint16) { + enc.Uint64(uint64(v)) +} + +// AddUint16OmitEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) AddUint16OmitEmpty(v uint16) { + enc.Uint64OmitEmpty(uint64(v)) +} + +// AddUint16NullEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) AddUint16NullEmpty(v uint16) { + enc.Uint64NullEmpty(uint64(v)) +} + +// Uint16 adds an int to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) Uint16(v uint16) { + enc.Uint64(uint64(v)) +} + +// Uint16OmitEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) Uint16OmitEmpty(v uint16) { + enc.Uint64OmitEmpty(uint64(v)) +} + +// Uint16NullEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) Uint16NullEmpty(v uint16) { + enc.Uint64NullEmpty(uint64(v)) +} + +// AddUint16Key adds an int to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) AddUint16Key(key string, v uint16) { + enc.Uint64Key(key, uint64(v)) +} + +// AddUint16KeyOmitEmpty adds an int to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) AddUint16KeyOmitEmpty(key string, v uint16) { + enc.Uint64KeyOmitEmpty(key, uint64(v)) +} + +// AddUint16KeyNullEmpty adds an int to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) AddUint16KeyNullEmpty(key string, v uint16) { + enc.Uint64KeyNullEmpty(key, uint64(v)) +} + +// Uint16Key adds an int to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) Uint16Key(key string, v uint16) { + enc.Uint64Key(key, uint64(v)) +} + +// Uint16KeyOmitEmpty adds an int to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) Uint16KeyOmitEmpty(key string, v uint16) { + enc.Uint64KeyOmitEmpty(key, uint64(v)) +} + +// Uint16KeyNullEmpty adds an int to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) Uint16KeyNullEmpty(key string, v uint16) { + enc.Uint64KeyNullEmpty(key, uint64(v)) +} + +// AddUint8 adds an int to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddUint8(v uint8) { + enc.Uint64(uint64(v)) +} + +// AddUint8OmitEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) AddUint8OmitEmpty(v uint8) { + enc.Uint64OmitEmpty(uint64(v)) +} + +// AddUint8NullEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) AddUint8NullEmpty(v uint8) { + enc.Uint64NullEmpty(uint64(v)) +} + +// Uint8 adds an int to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) Uint8(v uint8) { + enc.Uint64(uint64(v)) +} + +// Uint8OmitEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) Uint8OmitEmpty(v uint8) { + enc.Uint64OmitEmpty(uint64(v)) +} + +// Uint8NullEmpty adds an int to be encoded and skips it if its value is 0, +// must be used inside a slice or array encoding (does not encode a key). +func (enc *Encoder) Uint8NullEmpty(v uint8) { + enc.Uint64NullEmpty(uint64(v)) +} + +// AddUint8Key adds an int to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) AddUint8Key(key string, v uint8) { + enc.Uint64Key(key, uint64(v)) +} + +// AddUint8KeyOmitEmpty adds an int to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) AddUint8KeyOmitEmpty(key string, v uint8) { + enc.Uint64KeyOmitEmpty(key, uint64(v)) +} + +// AddUint8KeyNullEmpty adds an int to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) AddUint8KeyNullEmpty(key string, v uint8) { + enc.Uint64KeyNullEmpty(key, uint64(v)) +} + +// Uint8Key adds an int to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) Uint8Key(key string, v uint8) { + enc.Uint64Key(key, uint64(v)) +} + +// Uint8KeyOmitEmpty adds an int to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) Uint8KeyOmitEmpty(key string, v uint8) { + enc.Uint64KeyOmitEmpty(key, uint64(v)) +} + +// Uint8KeyNullEmpty adds an int to be encoded and skips it if its value is 0. +// Must be used inside an object as it will encode a key. +func (enc *Encoder) Uint8KeyNullEmpty(key string, v uint8) { + enc.Uint64KeyNullEmpty(key, uint64(v)) +} diff --git a/vendor/github.com/francoispqt/gojay/encode_object.go b/vendor/github.com/francoispqt/gojay/encode_object.go new file mode 100644 index 0000000..5f2c8cf --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/encode_object.go @@ -0,0 +1,400 @@ +package gojay + +var objKeyStr = []byte(`":"`) +var objKeyObj = []byte(`":{`) +var objKeyArr = []byte(`":[`) +var objKey = []byte(`":`) + +// EncodeObject encodes an object to JSON +func (enc *Encoder) EncodeObject(v MarshalerJSONObject) error { + if enc.isPooled == 1 { + panic(InvalidUsagePooledEncoderError("Invalid usage of pooled encoder")) + } + _, err := enc.encodeObject(v) + if err != nil { + enc.err = err + return err + } + _, err = enc.Write() + if err != nil { + enc.err = err + return err + } + return nil +} + +// EncodeObjectKeys encodes an object to JSON +func (enc *Encoder) EncodeObjectKeys(v MarshalerJSONObject, keys []string) error { + if enc.isPooled == 1 { + panic(InvalidUsagePooledEncoderError("Invalid usage of pooled encoder")) + } + enc.hasKeys = true + enc.keys = keys + _, err := enc.encodeObject(v) + if err != nil { + enc.err = err + return err + } + _, err = enc.Write() + if err != nil { + enc.err = err + return err + } + return nil +} + +func (enc *Encoder) encodeObject(v MarshalerJSONObject) ([]byte, error) { + enc.grow(512) + enc.writeByte('{') + if !v.IsNil() { + v.MarshalJSONObject(enc) + } + if enc.hasKeys { + enc.hasKeys = false + enc.keys = nil + } + enc.writeByte('}') + return enc.buf, enc.err +} + +// AddObject adds an object to be encoded, must be used inside a slice or array encoding (does not encode a key) +// value must implement MarshalerJSONObject +func (enc *Encoder) AddObject(v MarshalerJSONObject) { + enc.Object(v) +} + +// AddObjectOmitEmpty adds an object to be encoded or skips it if IsNil returns true. +// Must be used inside a slice or array encoding (does not encode a key) +// value must implement MarshalerJSONObject +func (enc *Encoder) AddObjectOmitEmpty(v MarshalerJSONObject) { + enc.ObjectOmitEmpty(v) +} + +// AddObjectNullEmpty adds an object to be encoded or skips it if IsNil returns true. +// Must be used inside a slice or array encoding (does not encode a key) +// value must implement MarshalerJSONObject +func (enc *Encoder) AddObjectNullEmpty(v MarshalerJSONObject) { + enc.ObjectNullEmpty(v) +} + +// AddObjectKey adds a struct to be encoded, must be used inside an object as it will encode a key +// value must implement MarshalerJSONObject +func (enc *Encoder) AddObjectKey(key string, v MarshalerJSONObject) { + enc.ObjectKey(key, v) +} + +// AddObjectKeyOmitEmpty adds an object to be encoded or skips it if IsNil returns true. +// Must be used inside a slice or array encoding (does not encode a key) +// value must implement MarshalerJSONObject +func (enc *Encoder) AddObjectKeyOmitEmpty(key string, v MarshalerJSONObject) { + enc.ObjectKeyOmitEmpty(key, v) +} + +// AddObjectKeyNullEmpty adds an object to be encoded or skips it if IsNil returns true. +// Must be used inside a slice or array encoding (does not encode a key) +// value must implement MarshalerJSONObject +func (enc *Encoder) AddObjectKeyNullEmpty(key string, v MarshalerJSONObject) { + enc.ObjectKeyNullEmpty(key, v) +} + +// Object adds an object to be encoded, must be used inside a slice or array encoding (does not encode a key) +// value must implement MarshalerJSONObject +func (enc *Encoder) Object(v MarshalerJSONObject) { + if v.IsNil() { + enc.grow(2) + r := enc.getPreviousRune() + if r != '{' && r != '[' { + enc.writeByte(',') + } + enc.writeByte('{') + enc.writeByte('}') + return + } + enc.grow(4) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + enc.writeByte('{') + + var origHasKeys = enc.hasKeys + var origKeys = enc.keys + enc.hasKeys = false + enc.keys = nil + + v.MarshalJSONObject(enc) + + enc.hasKeys = origHasKeys + enc.keys = origKeys + + enc.writeByte('}') +} + +// ObjectWithKeys adds an object to be encoded, must be used inside a slice or array encoding (does not encode a key) +// value must implement MarshalerJSONObject. It will only encode the keys in keys. +func (enc *Encoder) ObjectWithKeys(v MarshalerJSONObject, keys []string) { + if v.IsNil() { + enc.grow(2) + r := enc.getPreviousRune() + if r != '{' && r != '[' { + enc.writeByte(',') + } + enc.writeByte('{') + enc.writeByte('}') + return + } + enc.grow(4) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + enc.writeByte('{') + + var origKeys = enc.keys + var origHasKeys = enc.hasKeys + enc.hasKeys = true + enc.keys = keys + + v.MarshalJSONObject(enc) + + enc.hasKeys = origHasKeys + enc.keys = origKeys + + enc.writeByte('}') +} + +// ObjectOmitEmpty adds an object to be encoded or skips it if IsNil returns true. +// Must be used inside a slice or array encoding (does not encode a key) +// value must implement MarshalerJSONObject +func (enc *Encoder) ObjectOmitEmpty(v MarshalerJSONObject) { + if v.IsNil() { + return + } + enc.grow(2) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + enc.writeByte('{') + + var origHasKeys = enc.hasKeys + var origKeys = enc.keys + enc.hasKeys = false + enc.keys = nil + + v.MarshalJSONObject(enc) + + enc.hasKeys = origHasKeys + enc.keys = origKeys + + enc.writeByte('}') +} + +// ObjectNullEmpty adds an object to be encoded or skips it if IsNil returns true. +// Must be used inside a slice or array encoding (does not encode a key) +// value must implement MarshalerJSONObject +func (enc *Encoder) ObjectNullEmpty(v MarshalerJSONObject) { + enc.grow(2) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + if v.IsNil() { + enc.writeBytes(nullBytes) + return + } + enc.writeByte('{') + + var origHasKeys = enc.hasKeys + var origKeys = enc.keys + enc.hasKeys = false + enc.keys = nil + + v.MarshalJSONObject(enc) + + enc.hasKeys = origHasKeys + enc.keys = origKeys + + enc.writeByte('}') +} + +// ObjectKey adds a struct to be encoded, must be used inside an object as it will encode a key +// value must implement MarshalerJSONObject +func (enc *Encoder) ObjectKey(key string, v MarshalerJSONObject) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + if v.IsNil() { + enc.grow(2 + len(key)) + r := enc.getPreviousRune() + if r != '{' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKeyObj) + enc.writeByte('}') + return + } + enc.grow(5 + len(key)) + r := enc.getPreviousRune() + if r != '{' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKeyObj) + + var origHasKeys = enc.hasKeys + var origKeys = enc.keys + enc.hasKeys = false + enc.keys = nil + + v.MarshalJSONObject(enc) + + enc.hasKeys = origHasKeys + enc.keys = origKeys + + enc.writeByte('}') +} + +// ObjectKeyWithKeys adds a struct to be encoded, must be used inside an object as it will encode a key. +// Value must implement MarshalerJSONObject. It will only encode the keys in keys. +func (enc *Encoder) ObjectKeyWithKeys(key string, value MarshalerJSONObject, keys []string) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + if value.IsNil() { + enc.grow(2 + len(key)) + r := enc.getPreviousRune() + if r != '{' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKeyObj) + enc.writeByte('}') + return + } + enc.grow(5 + len(key)) + r := enc.getPreviousRune() + if r != '{' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKeyObj) + var origKeys = enc.keys + var origHasKeys = enc.hasKeys + enc.hasKeys = true + enc.keys = keys + value.MarshalJSONObject(enc) + enc.hasKeys = origHasKeys + enc.keys = origKeys + enc.writeByte('}') +} + +// ObjectKeyOmitEmpty adds an object to be encoded or skips it if IsNil returns true. +// Must be used inside a slice or array encoding (does not encode a key) +// value must implement MarshalerJSONObject +func (enc *Encoder) ObjectKeyOmitEmpty(key string, v MarshalerJSONObject) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + if v.IsNil() { + return + } + enc.grow(5 + len(key)) + r := enc.getPreviousRune() + if r != '{' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKeyObj) + + var origHasKeys = enc.hasKeys + var origKeys = enc.keys + enc.hasKeys = false + enc.keys = nil + + v.MarshalJSONObject(enc) + + enc.hasKeys = origHasKeys + enc.keys = origKeys + + enc.writeByte('}') +} + +// ObjectKeyNullEmpty adds an object to be encoded or skips it if IsNil returns true. +// Must be used inside a slice or array encoding (does not encode a key) +// value must implement MarshalerJSONObject +func (enc *Encoder) ObjectKeyNullEmpty(key string, v MarshalerJSONObject) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + enc.grow(5 + len(key)) + r := enc.getPreviousRune() + if r != '{' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.writeStringEscape(key) + enc.writeBytes(objKey) + if v.IsNil() { + enc.writeBytes(nullBytes) + return + } + enc.writeByte('{') + + var origHasKeys = enc.hasKeys + var origKeys = enc.keys + enc.hasKeys = false + enc.keys = nil + + v.MarshalJSONObject(enc) + + enc.hasKeys = origHasKeys + enc.keys = origKeys + + enc.writeByte('}') +} + +// EncodeObjectFunc is a custom func type implementing MarshaleObject. +// Use it to cast a func(*Encoder) to Marshal an object. +// +// enc := gojay.NewEncoder(io.Writer) +// enc.EncodeObject(gojay.EncodeObjectFunc(func(enc *gojay.Encoder) { +// enc.AddStringKey("hello", "world") +// })) +type EncodeObjectFunc func(*Encoder) + +// MarshalJSONObject implements MarshalerJSONObject. +func (f EncodeObjectFunc) MarshalJSONObject(enc *Encoder) { + f(enc) +} + +// IsNil implements MarshalerJSONObject. +func (f EncodeObjectFunc) IsNil() bool { + return f == nil +} + +func (enc *Encoder) keyExists(k string) bool { + if enc.keys == nil { + return false + } + for _, key := range enc.keys { + if key == k { + return true + } + } + return false +} diff --git a/vendor/github.com/francoispqt/gojay/encode_pool.go b/vendor/github.com/francoispqt/gojay/encode_pool.go new file mode 100644 index 0000000..3b26322 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/encode_pool.go @@ -0,0 +1,50 @@ +package gojay + +import ( + "io" + "sync" +) + +var encPool = sync.Pool{ + New: func() interface{} { + return NewEncoder(nil) + }, +} + +var streamEncPool = sync.Pool{ + New: func() interface{} { + return Stream.NewEncoder(nil) + }, +} + +func init() { + for i := 0; i < 32; i++ { + encPool.Put(NewEncoder(nil)) + } + for i := 0; i < 32; i++ { + streamEncPool.Put(Stream.NewEncoder(nil)) + } +} + +// NewEncoder returns a new encoder or borrows one from the pool +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: w} +} + +// BorrowEncoder borrows an Encoder from the pool. +func BorrowEncoder(w io.Writer) *Encoder { + enc := encPool.Get().(*Encoder) + enc.w = w + enc.buf = enc.buf[:0] + enc.isPooled = 0 + enc.err = nil + enc.hasKeys = false + enc.keys = nil + return enc +} + +// Release sends back a Encoder to the pool. +func (enc *Encoder) Release() { + enc.isPooled = 1 + encPool.Put(enc) +} diff --git a/vendor/github.com/francoispqt/gojay/encode_slice.go b/vendor/github.com/francoispqt/gojay/encode_slice.go new file mode 100644 index 0000000..7d964df --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/encode_slice.go @@ -0,0 +1,113 @@ +package gojay + +// AddSliceString marshals the given []string s +func (enc *Encoder) AddSliceString(s []string) { + enc.SliceString(s) +} + +// SliceString marshals the given []string s +func (enc *Encoder) SliceString(s []string) { + enc.Array(EncodeArrayFunc(func(enc *Encoder) { + for _, str := range s { + enc.String(str) + } + })) +} + +// AddSliceStringKey marshals the given []string s +func (enc *Encoder) AddSliceStringKey(k string, s []string) { + enc.SliceStringKey(k, s) +} + +// SliceStringKey marshals the given []string s +func (enc *Encoder) SliceStringKey(k string, s []string) { + enc.ArrayKey(k, EncodeArrayFunc(func(enc *Encoder) { + for _, str := range s { + enc.String(str) + } + })) +} + +// AddSliceInt marshals the given []int s +func (enc *Encoder) AddSliceInt(s []int) { + enc.SliceInt(s) +} + +// SliceInt marshals the given []int s +func (enc *Encoder) SliceInt(s []int) { + enc.Array(EncodeArrayFunc(func(enc *Encoder) { + for _, i := range s { + enc.Int(i) + } + })) +} + +// AddSliceIntKey marshals the given []int s +func (enc *Encoder) AddSliceIntKey(k string, s []int) { + enc.SliceIntKey(k, s) +} + +// SliceIntKey marshals the given []int s +func (enc *Encoder) SliceIntKey(k string, s []int) { + enc.ArrayKey(k, EncodeArrayFunc(func(enc *Encoder) { + for _, i := range s { + enc.Int(i) + } + })) +} + +// AddSliceFloat64 marshals the given []float64 s +func (enc *Encoder) AddSliceFloat64(s []float64) { + enc.SliceFloat64(s) +} + +// SliceFloat64 marshals the given []float64 s +func (enc *Encoder) SliceFloat64(s []float64) { + enc.Array(EncodeArrayFunc(func(enc *Encoder) { + for _, i := range s { + enc.Float64(i) + } + })) +} + +// AddSliceFloat64Key marshals the given []float64 s +func (enc *Encoder) AddSliceFloat64Key(k string, s []float64) { + enc.SliceFloat64Key(k, s) +} + +// SliceFloat64Key marshals the given []float64 s +func (enc *Encoder) SliceFloat64Key(k string, s []float64) { + enc.ArrayKey(k, EncodeArrayFunc(func(enc *Encoder) { + for _, i := range s { + enc.Float64(i) + } + })) +} + +// AddSliceBool marshals the given []bool s +func (enc *Encoder) AddSliceBool(s []bool) { + enc.SliceBool(s) +} + +// SliceBool marshals the given []bool s +func (enc *Encoder) SliceBool(s []bool) { + enc.Array(EncodeArrayFunc(func(enc *Encoder) { + for _, i := range s { + enc.Bool(i) + } + })) +} + +// AddSliceBoolKey marshals the given []bool s +func (enc *Encoder) AddSliceBoolKey(k string, s []bool) { + enc.SliceBoolKey(k, s) +} + +// SliceBoolKey marshals the given []bool s +func (enc *Encoder) SliceBoolKey(k string, s []bool) { + enc.ArrayKey(k, EncodeArrayFunc(func(enc *Encoder) { + for _, i := range s { + enc.Bool(i) + } + })) +} diff --git a/vendor/github.com/francoispqt/gojay/encode_sqlnull.go b/vendor/github.com/francoispqt/gojay/encode_sqlnull.go new file mode 100644 index 0000000..04ff596 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/encode_sqlnull.go @@ -0,0 +1,377 @@ +package gojay + +import "database/sql" + +// EncodeSQLNullString encodes a string to +func (enc *Encoder) EncodeSQLNullString(v *sql.NullString) error { + if enc.isPooled == 1 { + panic(InvalidUsagePooledEncoderError("Invalid usage of pooled encoder")) + } + _, _ = enc.encodeString(v.String) + _, err := enc.Write() + if err != nil { + enc.err = err + return err + } + return nil +} + +// AddSQLNullString adds a string to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddSQLNullString(v *sql.NullString) { + enc.String(v.String) +} + +// AddSQLNullStringOmitEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddSQLNullStringOmitEmpty(v *sql.NullString) { + if v != nil && v.Valid && v.String != "" { + enc.StringOmitEmpty(v.String) + } +} + +// AddSQLNullStringNullEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddSQLNullStringNullEmpty(v *sql.NullString) { + if v != nil && v.Valid { + enc.StringNullEmpty(v.String) + } +} + +// AddSQLNullStringKey adds a string to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) AddSQLNullStringKey(key string, v *sql.NullString) { + enc.StringKey(key, v.String) +} + +// AddSQLNullStringKeyOmitEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside an object as it will encode a key +func (enc *Encoder) AddSQLNullStringKeyOmitEmpty(key string, v *sql.NullString) { + if v != nil && v.Valid && v.String != "" { + enc.StringKeyOmitEmpty(key, v.String) + } +} + +// SQLNullString adds a string to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) SQLNullString(v *sql.NullString) { + enc.String(v.String) +} + +// SQLNullStringOmitEmpty adds a string to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) SQLNullStringOmitEmpty(v *sql.NullString) { + if v != nil && v.Valid && v.String != "" { + enc.String(v.String) + } +} + +// SQLNullStringNullEmpty adds a string to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) SQLNullStringNullEmpty(v *sql.NullString) { + if v != nil && v.Valid { + enc.StringNullEmpty(v.String) + } +} + +// SQLNullStringKey adds a string to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) SQLNullStringKey(key string, v *sql.NullString) { + enc.StringKey(key, v.String) +} + +// SQLNullStringKeyOmitEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside an object as it will encode a key +func (enc *Encoder) SQLNullStringKeyOmitEmpty(key string, v *sql.NullString) { + if v != nil && v.Valid && v.String != "" { + enc.StringKeyOmitEmpty(key, v.String) + } +} + +// SQLNullStringKeyNullEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside an object as it will encode a key +func (enc *Encoder) SQLNullStringKeyNullEmpty(key string, v *sql.NullString) { + if v != nil && v.Valid { + enc.StringKeyNullEmpty(key, v.String) + } +} + +// NullInt64 + +// EncodeSQLNullInt64 encodes a string to +func (enc *Encoder) EncodeSQLNullInt64(v *sql.NullInt64) error { + if enc.isPooled == 1 { + panic(InvalidUsagePooledEncoderError("Invalid usage of pooled encoder")) + } + _, _ = enc.encodeInt64(v.Int64) + _, err := enc.Write() + if err != nil { + enc.err = err + return err + } + return nil +} + +// AddSQLNullInt64 adds a string to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddSQLNullInt64(v *sql.NullInt64) { + enc.Int64(v.Int64) +} + +// AddSQLNullInt64OmitEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddSQLNullInt64OmitEmpty(v *sql.NullInt64) { + if v != nil && v.Valid && v.Int64 != 0 { + enc.Int64OmitEmpty(v.Int64) + } +} + +// AddSQLNullInt64NullEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddSQLNullInt64NullEmpty(v *sql.NullInt64) { + if v != nil && v.Valid { + enc.Int64NullEmpty(v.Int64) + } +} + +// AddSQLNullInt64Key adds a string to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) AddSQLNullInt64Key(key string, v *sql.NullInt64) { + enc.Int64Key(key, v.Int64) +} + +// AddSQLNullInt64KeyOmitEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside an object as it will encode a key +func (enc *Encoder) AddSQLNullInt64KeyOmitEmpty(key string, v *sql.NullInt64) { + if v != nil && v.Valid && v.Int64 != 0 { + enc.Int64KeyOmitEmpty(key, v.Int64) + } +} + +// AddSQLNullInt64KeyNullEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside an object as it will encode a key +func (enc *Encoder) AddSQLNullInt64KeyNullEmpty(key string, v *sql.NullInt64) { + if v != nil && v.Valid { + enc.Int64KeyNullEmpty(key, v.Int64) + } +} + +// SQLNullInt64 adds a string to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) SQLNullInt64(v *sql.NullInt64) { + enc.Int64(v.Int64) +} + +// SQLNullInt64OmitEmpty adds a string to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) SQLNullInt64OmitEmpty(v *sql.NullInt64) { + if v != nil && v.Valid && v.Int64 != 0 { + enc.Int64(v.Int64) + } +} + +// SQLNullInt64NullEmpty adds a string to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) SQLNullInt64NullEmpty(v *sql.NullInt64) { + if v != nil && v.Valid { + enc.Int64NullEmpty(v.Int64) + } +} + +// SQLNullInt64Key adds a string to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) SQLNullInt64Key(key string, v *sql.NullInt64) { + enc.Int64Key(key, v.Int64) +} + +// SQLNullInt64KeyOmitEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside an object as it will encode a key +func (enc *Encoder) SQLNullInt64KeyOmitEmpty(key string, v *sql.NullInt64) { + if v != nil && v.Valid && v.Int64 != 0 { + enc.Int64KeyOmitEmpty(key, v.Int64) + } +} + +// SQLNullInt64KeyNullEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside an object as it will encode a key +func (enc *Encoder) SQLNullInt64KeyNullEmpty(key string, v *sql.NullInt64) { + if v != nil && v.Valid { + enc.Int64KeyNullEmpty(key, v.Int64) + } +} + +// NullFloat64 + +// EncodeSQLNullFloat64 encodes a string to +func (enc *Encoder) EncodeSQLNullFloat64(v *sql.NullFloat64) error { + if enc.isPooled == 1 { + panic(InvalidUsagePooledEncoderError("Invalid usage of pooled encoder")) + } + _, _ = enc.encodeFloat(v.Float64) + _, err := enc.Write() + if err != nil { + enc.err = err + return err + } + return nil +} + +// AddSQLNullFloat64 adds a string to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddSQLNullFloat64(v *sql.NullFloat64) { + enc.Float64(v.Float64) +} + +// AddSQLNullFloat64OmitEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddSQLNullFloat64OmitEmpty(v *sql.NullFloat64) { + if v != nil && v.Valid && v.Float64 != 0 { + enc.Float64OmitEmpty(v.Float64) + } +} + +// AddSQLNullFloat64NullEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddSQLNullFloat64NullEmpty(v *sql.NullFloat64) { + if v != nil && v.Valid { + enc.Float64NullEmpty(v.Float64) + } +} + +// AddSQLNullFloat64Key adds a string to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) AddSQLNullFloat64Key(key string, v *sql.NullFloat64) { + enc.Float64Key(key, v.Float64) +} + +// AddSQLNullFloat64KeyOmitEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside an object as it will encode a key +func (enc *Encoder) AddSQLNullFloat64KeyOmitEmpty(key string, v *sql.NullFloat64) { + if v != nil && v.Valid && v.Float64 != 0 { + enc.Float64KeyOmitEmpty(key, v.Float64) + } +} + +// AddSQLNullFloat64KeyNullEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside an object as it will encode a key +func (enc *Encoder) AddSQLNullFloat64KeyNullEmpty(key string, v *sql.NullFloat64) { + if v != nil && v.Valid { + enc.Float64KeyNullEmpty(key, v.Float64) + } +} + +// SQLNullFloat64 adds a string to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) SQLNullFloat64(v *sql.NullFloat64) { + enc.Float64(v.Float64) +} + +// SQLNullFloat64OmitEmpty adds a string to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) SQLNullFloat64OmitEmpty(v *sql.NullFloat64) { + if v != nil && v.Valid && v.Float64 != 0 { + enc.Float64(v.Float64) + } +} + +// SQLNullFloat64NullEmpty adds a string to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) SQLNullFloat64NullEmpty(v *sql.NullFloat64) { + if v != nil && v.Valid { + enc.Float64NullEmpty(v.Float64) + } +} + +// SQLNullFloat64Key adds a string to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) SQLNullFloat64Key(key string, v *sql.NullFloat64) { + enc.Float64Key(key, v.Float64) +} + +// SQLNullFloat64KeyOmitEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside an object as it will encode a key +func (enc *Encoder) SQLNullFloat64KeyOmitEmpty(key string, v *sql.NullFloat64) { + if v != nil && v.Valid && v.Float64 != 0 { + enc.Float64KeyOmitEmpty(key, v.Float64) + } +} + +// SQLNullFloat64KeyNullEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside an object as it will encode a key +func (enc *Encoder) SQLNullFloat64KeyNullEmpty(key string, v *sql.NullFloat64) { + if v != nil && v.Valid { + enc.Float64KeyNullEmpty(key, v.Float64) + } +} + +// NullBool + +// EncodeSQLNullBool encodes a string to +func (enc *Encoder) EncodeSQLNullBool(v *sql.NullBool) error { + if enc.isPooled == 1 { + panic(InvalidUsagePooledEncoderError("Invalid usage of pooled encoder")) + } + _, _ = enc.encodeBool(v.Bool) + _, err := enc.Write() + if err != nil { + enc.err = err + return err + } + return nil +} + +// AddSQLNullBool adds a string to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddSQLNullBool(v *sql.NullBool) { + enc.Bool(v.Bool) +} + +// AddSQLNullBoolOmitEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddSQLNullBoolOmitEmpty(v *sql.NullBool) { + if v != nil && v.Valid && v.Bool != false { + enc.BoolOmitEmpty(v.Bool) + } +} + +// AddSQLNullBoolKey adds a string to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) AddSQLNullBoolKey(key string, v *sql.NullBool) { + enc.BoolKey(key, v.Bool) +} + +// AddSQLNullBoolKeyOmitEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside an object as it will encode a key +func (enc *Encoder) AddSQLNullBoolKeyOmitEmpty(key string, v *sql.NullBool) { + if v != nil && v.Valid && v.Bool != false { + enc.BoolKeyOmitEmpty(key, v.Bool) + } +} + +// AddSQLNullBoolKeyNullEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside an object as it will encode a key +func (enc *Encoder) AddSQLNullBoolKeyNullEmpty(key string, v *sql.NullBool) { + if v != nil && v.Valid { + enc.BoolKeyNullEmpty(key, v.Bool) + } +} + +// SQLNullBool adds a string to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) SQLNullBool(v *sql.NullBool) { + enc.Bool(v.Bool) +} + +// SQLNullBoolOmitEmpty adds a string to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) SQLNullBoolOmitEmpty(v *sql.NullBool) { + if v != nil && v.Valid && v.Bool != false { + enc.Bool(v.Bool) + } +} + +// SQLNullBoolNullEmpty adds a string to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) SQLNullBoolNullEmpty(v *sql.NullBool) { + if v != nil && v.Valid { + enc.BoolNullEmpty(v.Bool) + } +} + +// SQLNullBoolKey adds a string to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) SQLNullBoolKey(key string, v *sql.NullBool) { + enc.BoolKey(key, v.Bool) +} + +// SQLNullBoolKeyOmitEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside an object as it will encode a key +func (enc *Encoder) SQLNullBoolKeyOmitEmpty(key string, v *sql.NullBool) { + if v != nil && v.Valid && v.Bool != false { + enc.BoolKeyOmitEmpty(key, v.Bool) + } +} + +// SQLNullBoolKeyNullEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside an object as it will encode a key +func (enc *Encoder) SQLNullBoolKeyNullEmpty(key string, v *sql.NullBool) { + if v != nil && v.Valid { + enc.BoolKeyNullEmpty(key, v.Bool) + } +} diff --git a/vendor/github.com/francoispqt/gojay/encode_stream.go b/vendor/github.com/francoispqt/gojay/encode_stream.go new file mode 100644 index 0000000..fae8a17 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/encode_stream.go @@ -0,0 +1,205 @@ +package gojay + +import ( + "strconv" + "sync" + "time" +) + +// MarshalerStream is the interface to implement +// to continuously encode of stream of data. +type MarshalerStream interface { + MarshalStream(enc *StreamEncoder) +} + +// A StreamEncoder reads and encodes values to JSON from an input stream. +// +// It implements conext.Context and provide a channel to notify interruption. +type StreamEncoder struct { + mux *sync.RWMutex + *Encoder + nConsumer int + delimiter byte + deadline *time.Time + done chan struct{} +} + +// EncodeStream spins up a defined number of non blocking consumers of the MarshalerStream m. +// +// m must implement MarshalerStream. Ideally m is a channel. See example for implementation. +// +// See the documentation for Marshal for details about the conversion of Go value to JSON. +func (s *StreamEncoder) EncodeStream(m MarshalerStream) { + // if a single consumer, just use this encoder + if s.nConsumer == 1 { + go consume(s, s, m) + return + } + // else use this Encoder only for first consumer + // and use new encoders for other consumers + // this is to avoid concurrent writing to same buffer + // resulting in a weird JSON + go consume(s, s, m) + for i := 1; i < s.nConsumer; i++ { + s.mux.RLock() + select { + case <-s.done: + default: + ss := Stream.borrowEncoder(s.w) + ss.mux.Lock() + ss.done = s.done + ss.buf = make([]byte, 0, 512) + ss.delimiter = s.delimiter + go consume(s, ss, m) + ss.mux.Unlock() + } + s.mux.RUnlock() + } + return +} + +// LineDelimited sets the delimiter to a new line character. +// +// It will add a new line after each JSON marshaled by the MarshalerStream +func (s *StreamEncoder) LineDelimited() *StreamEncoder { + s.delimiter = '\n' + return s +} + +// CommaDelimited sets the delimiter to a comma. +// +// It will add a new line after each JSON marshaled by the MarshalerStream +func (s *StreamEncoder) CommaDelimited() *StreamEncoder { + s.delimiter = ',' + return s +} + +// NConsumer sets the number of non blocking go routine to consume the stream. +func (s *StreamEncoder) NConsumer(n int) *StreamEncoder { + s.nConsumer = n + return s +} + +// Release sends back a Decoder to the pool. +// If a decoder is used after calling Release +// a panic will be raised with an InvalidUsagePooledDecoderError error. +func (s *StreamEncoder) Release() { + s.isPooled = 1 + streamEncPool.Put(s) +} + +// Done returns a channel that's closed when work is done. +// It implements context.Context +func (s *StreamEncoder) Done() <-chan struct{} { + return s.done +} + +// Err returns nil if Done is not yet closed. +// If Done is closed, Err returns a non-nil error explaining why. +// It implements context.Context +func (s *StreamEncoder) Err() error { + return s.err +} + +// Deadline returns the time when work done on behalf of this context +// should be canceled. Deadline returns ok==false when no deadline is +// set. Successive calls to Deadline return the same results. +func (s *StreamEncoder) Deadline() (time.Time, bool) { + if s.deadline != nil { + return *s.deadline, true + } + return time.Time{}, false +} + +// SetDeadline sets the deadline +func (s *StreamEncoder) SetDeadline(t time.Time) { + s.deadline = &t +} + +// Value implements context.Context +func (s *StreamEncoder) Value(key interface{}) interface{} { + return nil +} + +// Cancel cancels the consumers of the stream, interrupting the stream encoding. +// +// After calling cancel, Done() will return a closed channel. +func (s *StreamEncoder) Cancel(err error) { + s.mux.Lock() + defer s.mux.Unlock() + + select { + case <-s.done: + default: + s.err = err + close(s.done) + } +} + +// AddObject adds an object to be encoded. +// value must implement MarshalerJSONObject. +func (s *StreamEncoder) AddObject(v MarshalerJSONObject) { + if v.IsNil() { + return + } + s.Encoder.writeByte('{') + v.MarshalJSONObject(s.Encoder) + s.Encoder.writeByte('}') + s.Encoder.writeByte(s.delimiter) +} + +// AddString adds a string to be encoded. +func (s *StreamEncoder) AddString(v string) { + s.Encoder.writeByte('"') + s.Encoder.writeString(v) + s.Encoder.writeByte('"') + s.Encoder.writeByte(s.delimiter) +} + +// AddArray adds an implementation of MarshalerJSONArray to be encoded. +func (s *StreamEncoder) AddArray(v MarshalerJSONArray) { + s.Encoder.writeByte('[') + v.MarshalJSONArray(s.Encoder) + s.Encoder.writeByte(']') + s.Encoder.writeByte(s.delimiter) +} + +// AddInt adds an int to be encoded. +func (s *StreamEncoder) AddInt(value int) { + s.buf = strconv.AppendInt(s.buf, int64(value), 10) + s.Encoder.writeByte(s.delimiter) +} + +// AddFloat64 adds a float64 to be encoded. +func (s *StreamEncoder) AddFloat64(value float64) { + s.buf = strconv.AppendFloat(s.buf, value, 'f', -1, 64) + s.Encoder.writeByte(s.delimiter) +} + +// AddFloat adds a float64 to be encoded. +func (s *StreamEncoder) AddFloat(value float64) { + s.AddFloat64(value) +} + +// Non exposed + +func consume(init *StreamEncoder, s *StreamEncoder, m MarshalerStream) { + defer s.Release() + for { + select { + case <-init.Done(): + return + default: + m.MarshalStream(s) + if s.Encoder.err != nil { + init.Cancel(s.Encoder.err) + return + } + i, err := s.Encoder.Write() + if err != nil || i == 0 { + init.Cancel(err) + return + } + } + } +} diff --git a/vendor/github.com/francoispqt/gojay/encode_stream_pool.go b/vendor/github.com/francoispqt/gojay/encode_stream_pool.go new file mode 100644 index 0000000..3bb8b1a --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/encode_stream_pool.go @@ -0,0 +1,38 @@ +package gojay + +import ( + "io" + "sync" +) + +// NewEncoder returns a new StreamEncoder. +// It takes an io.Writer implementation to output data. +// It initiates the done channel returned by Done(). +func (s stream) NewEncoder(w io.Writer) *StreamEncoder { + enc := BorrowEncoder(w) + return &StreamEncoder{Encoder: enc, nConsumer: 1, done: make(chan struct{}, 1), mux: &sync.RWMutex{}} +} + +// BorrowEncoder borrows a StreamEncoder from the pool. +// It takes an io.Writer implementation to output data. +// It initiates the done channel returned by Done(). +// +// If no StreamEncoder is available in the pool, it returns a fresh one +func (s stream) BorrowEncoder(w io.Writer) *StreamEncoder { + streamEnc := streamEncPool.Get().(*StreamEncoder) + streamEnc.w = w + streamEnc.Encoder.err = nil + streamEnc.done = make(chan struct{}, 1) + streamEnc.Encoder.buf = streamEnc.buf[:0] + streamEnc.nConsumer = 1 + streamEnc.isPooled = 0 + return streamEnc +} + +func (s stream) borrowEncoder(w io.Writer) *StreamEncoder { + streamEnc := streamEncPool.Get().(*StreamEncoder) + streamEnc.isPooled = 0 + streamEnc.w = w + streamEnc.Encoder.err = nil + return streamEnc +} diff --git a/vendor/github.com/francoispqt/gojay/encode_string.go b/vendor/github.com/francoispqt/gojay/encode_string.go new file mode 100644 index 0000000..438c773 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/encode_string.go @@ -0,0 +1,186 @@ +package gojay + +// EncodeString encodes a string to +func (enc *Encoder) EncodeString(s string) error { + if enc.isPooled == 1 { + panic(InvalidUsagePooledEncoderError("Invalid usage of pooled encoder")) + } + _, _ = enc.encodeString(s) + _, err := enc.Write() + if err != nil { + enc.err = err + return err + } + return nil +} + +// encodeString encodes a string to +func (enc *Encoder) encodeString(v string) ([]byte, error) { + enc.writeByte('"') + enc.writeStringEscape(v) + enc.writeByte('"') + return enc.buf, nil +} + +// AppendString appends a string to the buffer +func (enc *Encoder) AppendString(v string) { + enc.grow(len(v) + 2) + enc.writeByte('"') + enc.writeStringEscape(v) + enc.writeByte('"') +} + +// AddString adds a string to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddString(v string) { + enc.String(v) +} + +// AddStringOmitEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddStringOmitEmpty(v string) { + enc.StringOmitEmpty(v) +} + +// AddStringNullEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddStringNullEmpty(v string) { + enc.StringNullEmpty(v) +} + +// AddStringKey adds a string to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) AddStringKey(key, v string) { + enc.StringKey(key, v) +} + +// AddStringKeyOmitEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside an object as it will encode a key +func (enc *Encoder) AddStringKeyOmitEmpty(key, v string) { + enc.StringKeyOmitEmpty(key, v) +} + +// AddStringKeyNullEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside an object as it will encode a key +func (enc *Encoder) AddStringKeyNullEmpty(key, v string) { + enc.StringKeyNullEmpty(key, v) +} + +// String adds a string to be encoded, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) String(v string) { + enc.grow(len(v) + 4) + r := enc.getPreviousRune() + if r != '[' { + enc.writeTwoBytes(',', '"') + } else { + enc.writeByte('"') + } + enc.writeStringEscape(v) + enc.writeByte('"') +} + +// StringOmitEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) StringOmitEmpty(v string) { + if v == "" { + return + } + r := enc.getPreviousRune() + if r != '[' { + enc.writeTwoBytes(',', '"') + } else { + enc.writeByte('"') + } + enc.writeStringEscape(v) + enc.writeByte('"') +} + +// StringNullEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) StringNullEmpty(v string) { + r := enc.getPreviousRune() + if v == "" { + if r != '[' { + enc.writeByte(',') + enc.writeBytes(nullBytes) + } else { + enc.writeBytes(nullBytes) + } + return + } + if r != '[' { + enc.writeTwoBytes(',', '"') + } else { + enc.writeByte('"') + } + enc.writeStringEscape(v) + enc.writeByte('"') +} + +// StringKey adds a string to be encoded, must be used inside an object as it will encode a key +func (enc *Encoder) StringKey(key, v string) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + enc.grow(len(key) + len(v) + 5) + r := enc.getPreviousRune() + if r != '{' { + enc.writeTwoBytes(',', '"') + } else { + enc.writeByte('"') + } + enc.writeStringEscape(key) + enc.writeBytes(objKeyStr) + enc.writeStringEscape(v) + enc.writeByte('"') +} + +// StringKeyOmitEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside an object as it will encode a key +func (enc *Encoder) StringKeyOmitEmpty(key, v string) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + if v == "" { + return + } + enc.grow(len(key) + len(v) + 5) + r := enc.getPreviousRune() + if r != '{' { + enc.writeTwoBytes(',', '"') + } else { + enc.writeByte('"') + } + enc.writeStringEscape(key) + enc.writeBytes(objKeyStr) + enc.writeStringEscape(v) + enc.writeByte('"') +} + +// StringKeyNullEmpty adds a string to be encoded or skips it if it is zero value. +// Must be used inside an object as it will encode a key +func (enc *Encoder) StringKeyNullEmpty(key, v string) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + enc.grow(len(key) + len(v) + 5) + r := enc.getPreviousRune() + if r != '{' { + enc.writeTwoBytes(',', '"') + } else { + enc.writeByte('"') + } + enc.writeStringEscape(key) + enc.writeBytes(objKey) + if v == "" { + enc.writeBytes(nullBytes) + return + } + enc.writeByte('"') + enc.writeStringEscape(v) + enc.writeByte('"') +} diff --git a/vendor/github.com/francoispqt/gojay/encode_time.go b/vendor/github.com/francoispqt/gojay/encode_time.go new file mode 100644 index 0000000..6f99e34 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/encode_time.go @@ -0,0 +1,68 @@ +package gojay + +import ( + "time" +) + +// EncodeTime encodes a *time.Time to JSON with the given format +func (enc *Encoder) EncodeTime(t *time.Time, format string) error { + if enc.isPooled == 1 { + panic(InvalidUsagePooledEncoderError("Invalid usage of pooled encoder")) + } + _, _ = enc.encodeTime(t, format) + _, err := enc.Write() + if err != nil { + return err + } + return nil +} + +// encodeInt encodes an int to JSON +func (enc *Encoder) encodeTime(t *time.Time, format string) ([]byte, error) { + enc.writeByte('"') + enc.buf = t.AppendFormat(enc.buf, format) + enc.writeByte('"') + return enc.buf, nil +} + +// AddTimeKey adds an *time.Time to be encoded with the given format, must be used inside an object as it will encode a key +func (enc *Encoder) AddTimeKey(key string, t *time.Time, format string) { + enc.TimeKey(key, t, format) +} + +// TimeKey adds an *time.Time to be encoded with the given format, must be used inside an object as it will encode a key +func (enc *Encoder) TimeKey(key string, t *time.Time, format string) { + if enc.hasKeys { + if !enc.keyExists(key) { + return + } + } + enc.grow(10 + len(key)) + r := enc.getPreviousRune() + if r != '{' { + enc.writeTwoBytes(',', '"') + } else { + enc.writeByte('"') + } + enc.writeStringEscape(key) + enc.writeBytes(objKeyStr) + enc.buf = t.AppendFormat(enc.buf, format) + enc.writeByte('"') +} + +// AddTime adds an *time.Time to be encoded with the given format, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) AddTime(t *time.Time, format string) { + enc.Time(t, format) +} + +// Time adds an *time.Time to be encoded with the given format, must be used inside a slice or array encoding (does not encode a key) +func (enc *Encoder) Time(t *time.Time, format string) { + enc.grow(10) + r := enc.getPreviousRune() + if r != '[' { + enc.writeByte(',') + } + enc.writeByte('"') + enc.buf = t.AppendFormat(enc.buf, format) + enc.writeByte('"') +} diff --git a/vendor/github.com/francoispqt/gojay/errors.go b/vendor/github.com/francoispqt/gojay/errors.go new file mode 100644 index 0000000..0fd52e6 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/errors.go @@ -0,0 +1,88 @@ +package gojay + +import ( + "errors" + "fmt" +) + +const invalidJSONCharErrorMsg = "Invalid JSON, wrong char '%c' found at position %d" + +// InvalidJSONError is a type representing an error returned when +// Decoding encounters invalid JSON. +type InvalidJSONError string + +func (err InvalidJSONError) Error() string { + return string(err) +} + +func (dec *Decoder) raiseInvalidJSONErr(pos int) error { + var c byte + if len(dec.data) > pos { + c = dec.data[pos] + } + dec.err = InvalidJSONError( + fmt.Sprintf( + invalidJSONCharErrorMsg, + c, + pos, + ), + ) + return dec.err +} + +const invalidUnmarshalErrorMsg = "Cannot unmarshal JSON to type '%T'" + +// InvalidUnmarshalError is a type representing an error returned when +// Decoding cannot unmarshal JSON to the receiver type for various reasons. +type InvalidUnmarshalError string + +func (err InvalidUnmarshalError) Error() string { + return string(err) +} + +func (dec *Decoder) makeInvalidUnmarshalErr(v interface{}) error { + return InvalidUnmarshalError( + fmt.Sprintf( + invalidUnmarshalErrorMsg, + v, + ), + ) +} + +const invalidMarshalErrorMsg = "Invalid type %T provided to Marshal" + +// InvalidMarshalError is a type representing an error returned when +// Encoding did not find the proper way to encode +type InvalidMarshalError string + +func (err InvalidMarshalError) Error() string { + return string(err) +} + +// NoReaderError is a type representing an error returned when +// decoding requires a reader and none was given +type NoReaderError string + +func (err NoReaderError) Error() string { + return string(err) +} + +// InvalidUsagePooledDecoderError is a type representing an error returned +// when decoding is called on a still pooled Decoder +type InvalidUsagePooledDecoderError string + +func (err InvalidUsagePooledDecoderError) Error() string { + return string(err) +} + +// InvalidUsagePooledEncoderError is a type representing an error returned +// when decoding is called on a still pooled Encoder +type InvalidUsagePooledEncoderError string + +func (err InvalidUsagePooledEncoderError) Error() string { + return string(err) +} + +// ErrUnmarshalPtrExpected is the error returned when unmarshal expects a pointer value, +// When using `dec.ObjectNull` or `dec.ArrayNull` for example. +var ErrUnmarshalPtrExpected = errors.New("Cannot unmarshal to given value, a pointer is expected") diff --git a/vendor/github.com/francoispqt/gojay/go.mod b/vendor/github.com/francoispqt/gojay/go.mod new file mode 100644 index 0000000..76814eb --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/go.mod @@ -0,0 +1,24 @@ +module github.com/francoispqt/gojay + +go 1.12 + +require ( + cloud.google.com/go v0.37.0 // indirect + github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23 + github.com/go-errors/errors v1.0.1 + github.com/golang/protobuf v1.3.1 // indirect + github.com/json-iterator/go v1.1.6 + github.com/lunixbochs/vtclean v1.0.0 // indirect + github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/pkg/errors v0.8.1 // indirect + github.com/stretchr/testify v1.2.2 + github.com/viant/assertly v0.4.8 + github.com/viant/toolbox v0.24.0 + golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a // indirect + golang.org/x/net v0.0.0-20190313220215-9f648a60d977 + golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 // indirect + golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f // indirect + gopkg.in/yaml.v2 v2.2.2 // indirect +) diff --git a/vendor/github.com/francoispqt/gojay/go.sum b/vendor/github.com/francoispqt/gojay/go.sum new file mode 100644 index 0000000..06c2749 --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/go.sum @@ -0,0 +1,182 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.0 h1:69FNAINiZfsEuwH3fKq8QrAAnHz+2m4XL4kVYi5BX0Q= +cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23 h1:D21IyuvjDCshj1/qq+pCNd3VZOAEI9jy6Bi131YlXgI= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lunixbochs/vtclean v1.0.0 h1:xu2sLAri4lGiovBDQKxl5mrXyESr3gUr5m5SM5+LVb8= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe h1:W/GaMY0y69G4cFlmsC6B9sbuo2fP8OFP1ABjt4kPz+w= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/viant/assertly v0.4.8 h1:5x1GzBaRteIwTr5RAGFVG14uNeRFxVNbXPWrK2qAgpc= +github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/toolbox v0.24.0 h1:6TteTDQ68CjgcCe8wH3D3ZhUQQOJXMTbj/D9rkk2a1k= +github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a h1:YX8ljsm6wXlHZO+aRz9Exqr0evNhKRNe5K/gi+zKh4U= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977 h1:actzWV6iWn3GLqN8dZjzsB+CLt+gaV2+wsxroxiQI8I= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f h1:yCrMx/EeIue0+Qca57bWZS7VX6ymEoypmhWyPhz0NHM= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/vendor/github.com/francoispqt/gojay/gojay.go b/vendor/github.com/francoispqt/gojay/gojay.go new file mode 100644 index 0000000..d0c542f --- /dev/null +++ b/vendor/github.com/francoispqt/gojay/gojay.go @@ -0,0 +1,10 @@ +// Package gojay implements encoding and decoding of JSON as defined in RFC 7159. +// The mapping between JSON and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// It aims at performance and usability by relying on simple interfaces +// to decode and encode structures, slices, arrays and even channels. +// +// On top of the simple interfaces to implement, gojay provides lots of helpers to decode and encode +// multiple of different types natively such as bit.Int, sql.NullString or time.Time +package gojay diff --git a/vendor/github.com/francoispqt/gojay/gojay.png b/vendor/github.com/francoispqt/gojay/gojay.png new file mode 100644 index 0000000000000000000000000000000000000000..21090bdd20877f745003b24a4e8667f57b67a1a3 GIT binary patch literal 44163 zcmeEthd6l??>n)c?&Eu7 z!-#n95jDI@a%%7|H6hC~Q+-SO$%^j!8=_AS6xRJWt4tdT z^@(||c)umOL2(}cj;I2Wy6zVevR-n_8Z|tnxgDGl5rG|_!oDABUXu0bJ0W<*M)rTd z|8EB_cL0KJ8xDx}j4bUif5S~%2qzPil3B~e zd${#dTkH&s4GuY#4)O8wj;da{j9EoO5luv!@YNpStJf}e{G0f$w$cCDIl4yL8gtYy z9DS$EE+|Yn?K>UO1M1gLBQ5C(Dci{EiT0nAj|Dupj`9_M5Yi*-D}2@ZP5kT2J20I2 zUz>1s{cG_Wp0LLHtbnHJ>q~VHhQvM89rMYgB?bB)OS_?{S4Ro#sxLN z?%DNjO9siOIK0?z+9a6$=PQGe4y6P;4mPwf z49&7EWy;!HsUfJ!OX4O3342hr3A@B!zYZSq0i7cZQR9>D66@{nuJ+y|*)PoFIIDk2 zT3=1|fByR~0~k*Nz1*mgYgC+=Y0SWIK*gv(tbHL1JU&_dC!fmEKXVJUwYABI=WVhNZe*_?pdSR=HGD35(<(~dC?sis0~|33T>JNPmm}W za4-|K4cjxsmtu?+v_f_*d72QZ3xN)0{i{-J7&I<`tNKOicmEAojI6UZz1tBW4 zEmOpU0A5Ax#)QZuo7IP$i>IbZwyvU7tDRlrCuvg{MgOpz2*ZPVaon)`Jj!7ffBTb$;Ux(93=-i1FJ}Z?bMkzJhJs-M_SgT~8kNNjl!wXLJXh(SJTEV= zTmr}CwN?JQwr8eWw{E%kU(J*9_=zjfR|~g@&V_MH!%PH&&Gt6~BE2i~2l70?m5ZHd`&N)|pG}eZe0v zWPHWx%{AKjtZqMkJ^h6OT!F`5*w=TC>YYkKEWXFqNu%_U!K8{%D;+d?eX!GL?Iu#J zs_fM@2m}KwbRWM$#!W&1yHdkDoNy0B9Fbe2$N72e6OX=_{|F|)&nKUKXuGjuVzj#F zFT}&ghv(|x1c?C30#|-0a+!cbid8}AHSNMMxvg#9@HN}%jgnOXSXN|V`yMkfTgR;% zl`8gd9clfC&c#t(0k9j_2k*RBe|K+WKLQ-(c`q)6lzQ0cpscX)8RtWxVgGn1dJni! z770Y*QH5M+ z0(9^C;1bkK@)(JHs8*EYFvKlkzjxSG^0^2%NvvSEH1iMAg4~`71UF}(&90Gz3Tcjy zRA2Pb+9QkE*9!dD9jVu-&wUqkJ+&0~eCRfS1A9E}d7kG?Baj+zU5NJBID>L8|e%@Vd( z(~XZFJ-T08QL!D&QQu4w6?g@qohs=xQCuqGS|QpQjkM<#t(s7g#`@{9QTx)*CoBE> zl10Z+x$!%Oxp>iJ7(Kcxf(tCtT}769q0pnedFKtze-8h$p9ZCT=ENFnsHTqolP zP=F-}H9*c!LAwyYU9Rtg%*KpnyvmIjRiTJM91LzC5XG2-bVc`7FlF?ekk2m)Gsptw2 zugLt@jjx@-a0CNIB~wNRsgbC#1p=34b@EnKmWP6XoOR# z9buu|Sq;)j)^T)wW&W&$_CK50TV!lh3MylN)F8~S>LAA7Q`W_uV!c;wGbm7(%bg7_ zGVQnAWUrj6xTk^t?AW(_ELR*& zBDV8ocqH|Glx<_u+i@S@?%a%v>ZN;LH*ek~ey3Dg-=xWw69JvhfaG`0zN-l&zhEvz zh<-$-K5w4X=s~(a$ORYOxH-A(7H7PxdL;Rvf-3E4TyAdewQ$Qu4P<|9y9v$U??$X1 z)pe0_dGK}`Y&q7jT50@3f^KF?zMsT-W7lJQfka}R+EVdfI}8YOlg`X`5s!;wIoF(^0R}8st4ErGh&Js0R9#^3!-pM7`3`0dhNCLb+vMch5H@Q1kMUA*Sd5pH zlaFUTgt`t%!%n+!sj%m^i({aqlHVyvjR`aB!zuK#kgcf&+4?j5Hz~X$#i7QX!-~5vrR`IrwfNPR9bcKe*f2Sc=Ot0e6oWY< zW$pk|YU`;r!DguqRD=zu6I3uNw(>7_C1y%Oa#Ota*ij$S-Fd5>rM+AawvGZXMvTlC z>nlN$JEu0^7Kw34u@l{ih`sOaY~MBPEgK>5@UG&loaJHya+e3CZ##`aUvnd&jokC~ z9Ju9su8fq2-6g-^%;9L6xp+_33=$OjFtAwK~;y9af_PWUgqueu`H^;?pXo zzvJ-nx_CbDYf2s+6}977iWl`T=1{X-tX34~T*`nUzDJiw7fW6PZ<`gza8V&qYO@)~P|G6k@jaS;I-TA}7a6lhVcn zVqORw)jnK3tD8-t1bWumQKm zP~b~!IngISkQ$lS*yrhBwbZRgQ2qLx(*sCeWQPNCMR*L#Pn-EG{$!8%BQY}bM5j66 z>w9ii`L>0BX{|VJw%_DOzEHrMgR)3_N|aCJGIPsJOT|5}r_@F7UA``&_JFtTUdj^^fkTaC8h*FT8`Ov*8&dXg!Rt2*4f* zf-G$ZcAIV8dar>mo||Tn*Y+OVzaV89w~@QfxpEJ6!IPJNbat+td)w*GvQn}nYhjc8 z0SHXfF5CrMPSn9;t~(Gd!LyurBT3)C-*0~2J|H1{*`d8JWqTc-KF^Q)OUYIgUcPZV z=b&n|9H8Es|FZ5@IzEuNY7L0m zPL=O+y?%vaQb*dcSC&sVWghQ=CULI8XY>4_nn!shcyB(GFGot_*$}s*N!A1?VL_Fn zV^A$qwkh0&UV$CnL^zwyPW&(`RgVO>G9jYL2mpxbu(4CKT(}>~S3CjOX64ruHR7bwaHR|Cmk(V()pa-tR1kw}B3mXetSzV9UiF za71>xDFM9)60lv;Ej%b&!E|e9+I1_fKYsjJgI$HWX;6LT z6a8U|8~>dw^v5l{g5=n8?D%{9Z&k!95jEYpcwtt4;LBk|l2z)l62Kv)w4W?ttaHI< zQm%=%r$JQjY-JrVgem>tftyYv^sRXEmxsyWa5Y8L}n1a(((2ug} zJtI32xjg<*Y=P9uhn3u2<&dEfx>8FW%dI{_z;&W?}sR8 zRzhOuJZ$CCv0!)R3d#2gICx)NXgQ^?A3ryzn4qSks~OolBV>HH*nLMewQ7`3`l*e) zT}{}Q-P);roA5TowER2Z8LkJC$&Z)1^WoFsa`xIi*@2V}CMf*Ijhi+%XqRkn(E57DzExtpPuTexxr?==2dy;In;_S8ns!BustiCVl(QyFlUz9-cE2?ZIXb4?K?y!~ z;i$)ZtS#w*&N59pGgo)_g(%BfjWKZMbE##^mxQ?w2ruy zCUfU7<7B8>KU6!@scJ=LNl*ZM25#K%k6pI_R{E{3QzHYQSJI;zw)F7uFyylT8k4P}Y!l{%e9_C(V9R{OUu7MTZqcGp&iIoLaH|jr>v#Zl z$Sj!P>8tfDw{wzUI2PTY z9exh~Rxcjy_QpS=oUT9Qq1Q^e3`T1qbV3vxgWDrU7w^&E0(?&l^!0C~<{yEk!1?(n{z>{X*W^w zDAu04{91Zy=>RjbSN_ptue3TBXcN9iA1*?oAl)qh-%aKKw2p;sF#O5qw=+IgSPFqU z0zPqS_Ljep+b`=gAG^(!20ia8)4Zw;Lbk@ocjK_GdTOQjt}f8E37Z}GH`QSC{jMoW zbABuS$j89fgskk7l=+x&^GTyaiqh4#XZ<+sZ{z;TNI+>cyM%@pfFPQQ*Ll{c
  • @ zdYQ4oTFdr*Wtf>~$Xg^?y-0*Rgrfz*Eq`P4$93F`{f2b&C%dowfbain8d><~nR)J} z1^7f^|KY<>UOv9}RimnaZY>>^f@eFM81X#0ew9Q$(mttkfyYT83i=}2xvOGdtmlhL zNzhA4tErj!k2@(Aa-;c}KdTcOrWLy^_9I>zidN_KOGKB!-rC{!yPiIlxnfo>@gA+r z`Fo2SH-;2*c=TY9fDCTuQln)6(LY2M)CxL5t~_}xH#WmLmUY?ynMOvu4JmHU_C4Q- znpMUyggk!u@S%L(@Czu`YLOGh6q#bGv=Ip@e&re^JWNF2voB5ROB4UQmiROd^zKcW z*iACyVWYL7f_2A+3R;2y5Ko=B737Wm&!$to&p+_LAw+?MeSp|uM()0|F*K+qU}q&X zi1x5Oe~o7OahE^Xw0>q8QQ*|cf)sm)F8ju6&BaeDJpQlst}*|^0Zy((Qz$T;mo8Fo z+g6JiuOZO;lB|P3-cBgd+IouIe+(_an2rNflqyrOn|)wO zO7&B}IOPTQ=pjRhq~TbXdXTqt?>As2$^V0;llkiv%m_zQL9eC1SAops?P|**heDUx zEJppaYv~o&A)l47V^q6^(&ebYjuWzB(GyZpwYt!09__&pSr|i*KY)2hy>+6XgOeoB zem3OH) z%P)psCL|<$fcrE5$)KE6Kpq$;?lHAffW^`5u}Q#u+a;@#C7UEQxb19WR|wc>TIKgS z>S~Yf(?YBe?~6z+q$$wl%GT{i$j>cuGETJ z$9sACJUf_D{c!UwsVWn;L*#%}s>fHA7Qz5NWJ&9R6xUz$NekkBSsdK~&dihTyv zjBGXY;_Q&S_fTPpHsXdV^sKBi@6mniIXl#EmVXz`IjN(Un*k_KF^x!*wAxkUSpAz8 z{8ld|(7PnpSrKnq0GeqM2w#nd{TkOu&D!>FGPuw3W0G&b<-f&%@G|`FJI``~P*5{L z2dgs@jp>I?TH>oq!l)m(-dH<@zs?VXEP|$SMJk=2TA9k@Q@U_T3;o8#H3Yt9PEA=k zIXS{V9d?caBS=EOI-%{UQ41eKHu$aTrCk??eNyAHeD36oxY5>#>~sK_SpFRy7JLIm z31!7gRoyD%dI~zm9$d}0n|smdQ7D;>*00CL%zx#Ti<2BRuYB?B}Qb{H^u*ZJAms-Ar|eq4|<$nloN6MZEz5vUlm;*+{k2*(0P6 zCC}aKq*|pazakJjrfvfML|f!2V}QPeE*E03c4`VONfx*+_NGa(k%XPna?}@3oUE$Z z2D2!*@OO6X`!}iZ<(1hk#ubvwQoLp=k(n@I+6Pg6bo0rkSLb3pSLbVATkfO{&+R=~ zz~Q-)5H4@16*0Z`ws_5BxtMlYpxLJE*Zu>^3AFwTra>l9>s5U9rqbQ4ShiQq+9L8) z3cklGW_hF`4WjYnpIt;r*3W z)$9&f5@HdTJSyu&NlTdj)>!Vh`=Q6*QCUKyZVad&0MoX4Z%#A6|J+DJLqk`)*n+ZRb87ND0+K{7@HXyrSI8IjA58my+A)J+T$-f0Jy zJ-|v~0JK2_5K?TsqU6eF+jz$CxGS@QudC$=AeupS5Er#}H$Ng;0+KkBkJxRSf8_D? zBp-cW{I6ZiCnZ71r%m{L_4g~zkr(}d;HaW(AgYN-gkLnfJ+;~yO zP`q@~0@_{|5MSCI?h~fd3kt}E>H04mt5MtuxUR$ zC}DL|H*alG;H$MsK1sh(b+n^b7E~Nsn+vw2Kw#no>=jM1_|hS9q!{HBjSS|j;s8#rSOvU7YysT-QT>-D{jAf{;Oz>K1$xn` zDtR4+3Bj1P!_m)?(xfWVn&75V347f4KDiQZ2|!o$_pE8L-no1ST3DA`dmtUk7{CxJ z8WFnHwgIoyjNZHa=M->F<*qL#+|Z_9({#b(8s8g=>XieOZN7w028VObH%{ZmwV9fN z&%*6Yo7-R(AD!{mM(Te+I2gbO-$PfIewG`pMcMXdG>eY@Oi7Sjbb>mi#($c)|F0LI z-~orfo4e}I^g)gHU&2ZSP@npb9#%V>zao=Nbr{O;H3!gvm2^vD><=Ip0UF`QV_qv; z1|J9(IFz!Iy1nvPVJzL-17uzCZ6BlTFcknDL8bTzJ+tlH6k zX8@Y23DLx`IY{I39+7@L@Ts>t&jJ(Cbpy#DP+s^cTmHME#lNoxb>7WO1kx&g05|#q z0NHC2wLg!aR-RQEJbfCBdVE(8`h-0a#aDY6Srb?AJFz1&31&M#&2{2jV{85NG0)WO zz}5PKwr@ai$RqRkXWkYID+6^pnlC|1wK1#CC`gP^FWyz5|bCv{PO;{)*5`5=(XZ;7#7~!4pNq3G%sCI z0xs|gpq}f*4RSzYAy(8<`F2wDK3=yRaOMQ>()|D;1>{G1V#QN<45}>e;_jh8R zNhgj5KEX2j5<4TIxsw@l6b&Fa3($!ETWpa|`hmX73A&N(2gMT4!`Q?`l*XKo1HqD1 z$^Pb8*lu*-fRbJN*uCH;Sv?^4h@MRUXzm4!e$(AIM^4bUSk+{7c$K#Zl{J1}!f1^G z7JonKn(d-b&GVzVW_qs}=3&RGqDMO{T2`S>Fi)2*`!#)39U=5Md1n zTScWU$xzaf%V7ZY>3G?s7$$w4-H&>1-*~3E$w#rvcGoa&c zNiyO?MuGgoO4$_>ObsL%Y6QlIR=y@-(&6Q+jO1ir2=ygx$il?LM4!m>ztLY`u#$K$ z2XM8%h0+h}xY`MxvVl_fkCAt~$^rs5)sBD3+ty5iKMyzzN2p6acBB#O_|5xs| zkwaP~;ziMDJuAu_J`uV7X>6_3gM}e{5J2}1NYGpGtT0|m8l0GAQIR6Q+pKESO#fZ} z=GI$oq9_W=wrBqVt*1NPf7kVM8c^0;NT|&Qn}tE%^WaDiH&%|7)^{>R@lv^M0z&?! zetOfwp8qCCPC&XIi1>VlshCf@6 z3M|2y%|U%$T72=kN$b;1Cw4{mszR}@3M1atN>&}4(uj+t1Cp%a8OP(k>n)P-RvV-k zyL8>I7xV94&Ga%5N>tQ?cRA{nc*F@58ao&KlzF`5A$i5k*36lxO&?Ls9Ww|4^M&FO zJ2v2grQEL4vFFNZo;$|Kn-kA+LL>+w;Qgrlt%0EP0MhEO(dPrzf^4@L5W20nWl0y$)VJhNVxyVksRKH;@u3ioCkc{ z5H65gvHWK+XlrFXr}7Q*xX61WFAo$HntspKIs)Mlv0lO$!ImQ)i^&5s$&lElcTlg`)depDjhC+I!X|M|ZO0CsOjV_-xOEU}>XLIZ;K*lQJK<6?F=}MJIgJ4ME$>D8l5S z*+bALaDGVVvh^17mUcCtF?T$wK-<%!NGkvbQscApN?GgY4ECrlMebQ8{zMPJwQ({@ zF<$SV@)N)agL%BZZ3lZz=SbTQ7){CCEc^GvGTFrFViTb_;-jOsDMYJ)7+|O+5&trt?hwgyV&SlNzXY>dqF8wZg!R7jvdz>I^?P6-j)z zR{c&^#ZV{L#`-k9L${WoEP3lofJQnpns)}v4olUFim{T!*~?GNIQ)SjBndKE{k>M8 zG#ZR|8Ca#P=u@&60b#GrnZrmuA>a`@ww4}hOMBe#grZ(|)ze^!bjQ|-jCxigETJUs*a|%6;hwLp4u-G{>q6z z{?A0W`$>#&=nHz(cE_LZX3S0ef)Y=u2ay~Tb0$)xeJr43_I>m~M@Tm#Ru})8*1kx7 z4)@l9gJJ&JD#vQ?gMoAl*0}|AGpWAX)QbPUUZTxSRdELAd0^_H#KI+DIkp zl9 z?eKT~d2r-RBYCSQgt>L_^kNMHKf-~8uOwdMvsDC?gr}DZ^$j{Hiw-oXh zE=2C;7pGuqdz*N893vo?>@;L#BNCSy>a|w@WVMfQ$I6+zbeog z{-)}@3)JJaHOWthR}|DrhyG^AB0X=SSV_vq-B9{G*$I)yG7x84HNtGasPj`+YW{fd zNOv%N4*QbNH2v&G5lakaM^N^3^xsO- z|5?iN>YNaDMv=5#=jFR-j5{?8_D{m(KD|;J19IVb?1c|-BkcK?$C+I5%Xq_+d~|S4 z$LZ`AiY3n1(9I+BRsC36HTzn5CdxcRAmHlR@%(UFYvcXRV6U!QRC+E& zF2Ch5&yyv$T!2u`W7T=b6aC8az*3AZTvY2eOPZZa4*gcr`vGOQ`%=I7SOgHRya-mx zS$oJ7Iz->aV&A7NAPeLxA`6*XwQTokbR-e3uum+l;ox9P-%myt=E+%t{&Ok$$dSXJ zLr(4-56o)X#vU2%9Tq8C=&XiwWMN_AKpu&k0IFmEdJD1R7?V|vR>5ZP=nqoAiL*S7Yp(np&8g=3O31KCIdrKK2@ve-lY2QZ1uEq zI1f6$n`fr_=+Ux{nc1{sjq~@MtcDt@pj(loF5}y6L4`(Mr3t$IQCSkq!7UfoZNhJ0 zxl+cz&J%$M?4+aCXHe+=!~H)cmr9)0VGd>`lYF+U{`{%a89BW7BX(voP`G7VrsFI69_r3b!^$s0O@QI=I1ZuvM;~Ke)-`?Kdge5~0V~f+(&O%yh zYCpJgI1_YLVE5l)G2A(%Rip8liALlWB8tu3<^Vh-dP&CM_Et4+-dcRhC|2kFLyOS*v=-*s0=m&d$YNP}zxBtEQH|H)_UROfC7G={o z=^a}tNEKA*)f;zsYxw^>grvc$ojTV@0T`b70v^vq^U&*p)-jp0*+ zo(eR&?@o?N}LROjs zfJU2VBHgjoI=gqBYQ!j$S;=wM%Fr9*L%X8CiLgBJ9 zd82uWD6FKplk8{lv7*bT_*0O#_`4wQ9ElWyEnl&agYQ-GuTl3@gyXBver`-PguFre zp9Wu?1h<4V&TPtGX&uDLz}cxn-j53S=D2~Nrd!POkyG~%4M(hh=_N*bMh(@N*rhD) zm5Q)hsSRij$q+q(k8Oc) zIx0%C!TMfQ4uS0^0i4EX{$dJ5rUKFrH;KW@U+(8~Gk?3tCo>A5+RA>#?WEtkj~9;q zfE@VXRK@&z^+pJ~9RWCCPncV>NBx(o@^{g~>ZYAKpY;JKSu`67wy>V`0QFuHwYjEH5v;>gb-i^QvgON*{p@@q1qAQll6-vi zUm>uKzC9J!kavHW8QYq@1z{@;;-@FixKJM8D2A)Ej8G(GMl$Q984{v#rGC_3-k@{e z_%#XKMaZ}(PZb&8QC6`@)8YB6?;^5*@ekFm<|m9j5#Jdn$H$!+vAWw1A}FlUTGBDs zuZYn2SM{X@CX33$nMY_$dRI<%(Y5-N=NS^Ie>%~v#N*;O+I4F z8-4>0UF8|6gu1qlM0ilOA0<4@hbKl@b7y^YyQXuvTHJPHc9ptDW%+YweQZnOBK4rw z^FmAdxe9r%H>%^|A3Ktj5_IKC_o$&dfCpTm44cKeK7D^@;xk5Dxw7Ufs}esY0tc^N ztSwbm@g*Mi)>meCMivSKTKYREHJwIkrdSYq^A)1-J#Ljkpl^%g-D zi(c~X(r{CW+^l7X6%oh&x36*{JsP}>RFO?sKS2}?Y<&Xn3hw^ZEDAVtcGX5!Jkb2T zwF0KU;*7C=QMw;nD@x{{oipEvc|Zhg<+CEV& z?s}-;e1E%iUqi+fB_X;^6$ zo?9kILBh&_*1AXe7fioaDlg%hIf#@!W zDuB`B0HTwbKT06XXDdsIpmeMxP;S_fo-sCtGY{7` zW5mbGr&FZH^S>?ps~j?lklr>2`B(U1?(nD^q4navj#{|sP?ozkV0jwn(a%*NCG1hG zTz-fI3a}vEb44#~!FB15iE9M{s{sIMb;)S=etf?aq!6pS8lts4(uO?>hDd=)!}OYk z6Bq*sn45{!oE#5DD@##y2jT+lKnL#wIuFwFDS8iSX^^Y?pe-&`4DQqiUoG$Y_iuak z-*Q;%(oRzsj%LYwZFe2ekhi}*6JX0>$DP7IR)2az;G2ofJbKYxvNLfzITZ3N?``rf z%Qy)zovij4YfA60%X^3ZE={;Nl`a!O}=8g62Ya8nSNoWhI)^iq!9a-425M`x~`YW*x$)8=#^kPOS#p`y*By<59 z+rUHZJ?6hfOXM(sxiYAcOXd0v1zHpdiah!Xu`{hgu~HgecI|l+U~hepB{5SbGKOV3;%YffqOAb z+MqvZ6u#p>nC(1o3jWb#z~jY)df4}eP?n`%I@}W3QRP}fwXExKkp@+6Ah#g04u@S@%3(7 z6EViO?e?wec*WJ6rdSmeh3TIUk7?qc%AGSqm1~mgaOlF_mSM)NarYy44Zn>kh!!^vsYEo(V>N-ODw%!(bu5tun_}_FO_U0V zlUPTSV*r*iIdeh?l6kF~U7zEFZ?@(^y6ME~e{k|J224D1n=@fb zhWEM_`VQ_2YMeHeb#!!`jizjefK;x?ah&9FoKzETCM%=cpOG|uBnL@A$ySC>%PPZ< zE%EzO9Q8yBO9k3%{w2S=%u091p?lJ2y|SABqCw6HL+<8y_%n0RvvG~`?>d+DH7OCb z-=>|OsrH^cVi9<7(NqS?6>zQ=i_++2;+~4{Y6yFJ z(8F+YFnq_!g4$r7z%=gE64K(~s{xF)5%&fW--3CrQ?@OS@e)rh{5yU6eq{JHaf1`p zHxA@iMY~1&EB=bL=Fdz22-n?SafIY0ZA9~UGR1+8q?hbRi=qOMPd@jiSW2NhF&krR z$0g+9l+;Cl#Jc$H-{Wqa=DeB@7(Gb}$J21oGf06)x$sE8cSL}(5MHKzd!uMI4)|Z{ z+hylpWh6-EN3HFSJ(a7ipOVx_2NKO;zOV8)##pp+^aA$yx#6hfPXY$Io(gwh*IJNS zCg_d}GFpiMF&X%nMwXL_N2K%#dA&1!Ci_SC*_`HCHM&;(-VrtV&92`h8-V#K>v+0QK?oLNv-M|!nvY}7I4w0hm$2hf?_6}RfIkOsg&Dr zD%a#nD;Y^>zv|QV7s3}=9==v!DnYbr)Cqg!sNeKT`l^B!c)_f^AS)vMY<4Z@WC7C_y((t{aYM0qX&1c9QJEA?OsY&KLE)0p##E0jEx~i zt;*Gizh^=LiG_|qe;50Xsc29wll2~3`ZF@p^|@j~FzF&!(l`H6-&ZsFC7E;7Q<3EU z4#Af(?X=OZpt1qhN_OxG=?5$32Bm7=yf?Y}5aic)Vamv7!VT{wd3=m_ZHXi-Ae%cl zp@V#hTbp(od?fYVbHDFjeA1KG13s2xqHmGD_eYXxJUQI8JG`P=IVF&Xeu}+EP3#j6 z14bcS4+&%rod76$*2lJ#N!c_5a=j9iri!$_PD4YgdQBx3(9t0WH!*kZ8h%I_$@RCfdk_LXWZR5R zA?-m>o`Z)F=WCtWs>Puz2rIk#u34kxJgYQ21) z-nQ!dXr(m(ss&wHv|Mvh<^ce&82GjwB~+B}p!PPjOZ;|CRtWvd(u}D4WOrc-EuQU; zZrG?4)8OAvplit=JCGBiT*jxPCmc&v)XS7fL1?EP%WArRP*(SM7iiwMl69H7^D{^t zcf}t}c4-$Htz^6dPK7IQ-x9o(jNV7zA!~YNI(=u&Nl7q|kf1|ds%H{Y|5z>W&1L<8 z#Y+>GpYPRmf=w$TtMd+$p(eZWs7yH_4vGaGW0PUUJO*%(A#Jz-ik!;JfdQ{zo3A)- z=?J*-da;!lCaIyMc8F6L_pQzr%puMKc^S5J#gG^Qox$?HT6Q%q-f@Sd|&JgeX z{QO2jNpyX}`90Jf$R%O2FplZq*-X&^NJH6R-eOpt23=_2735I*>O8Vo@4f)RtLA|x zWQ2Jx60*4USkFpfweI^<2^oKMc|&+V3QRJvUO}Q-%p3GA50!%ANSJ}0Y(cHyt7l_ zP0#YXy>~76zw~bk;FMqkz;V#s{i}8JpoPjp5)qYPwu7>3_VE2WYOc;uiF%KXcpZ~q z+Ti^Vmsj~_@J$T^pe7t0pDreU;IN31S>ye+mg_IU3`AuHgQc z3-k)rg!ZBG%}F<`vaPHB8o&ZIkrJvdr*WV}yTSt3ch$#+59Ey2wmu%Yf0E@BeCF_$R|C zdV8d|LOFXrDIv~(9L1b4?g_0C?dj7s; zECXy01=Q>*RdEdju6NOVjAMExjP1DqNxUxALx;z6!6Ao^2%4Rq$mLs5@$}(fpiAV; zLdQK{66zripZnA(c&OadShK5;qk7bIb(i$0s~AAf((++J+a3h~l)&YuEx>6{JT z6#W=k1!n&g00E^W_OmQS>AMuNz5h76k%t0$4(DqcEZu2_`Y6}#qId=$sQ|t_33}wI z6_vm4h<`4V%#)X4y9l9L#`bE!O58fPyBbP^j$T%snSx>fdtEc-Iga;P0`JX$0^Wrv z-34n#@J6>$5aL49eF5Xy6#PrH07HPFDqN9f_MdU{1yxzOdQI|q)Ph1Jbea<{RRCrc zo(vzOpY=V%S4&(UWLO#o`31qDh{^cPpg*~|a}@j5aoS=nFi~&i0a6GUqmDJ>Ng_i{ zJA0LVzu!qP`GJ#-*QEhuP48Y~uO{yKQ+@qmFX5XR-tz9Q=Hw)yOAuLNQj&He!wml+ z{gd)mq6n4`TqWE&BYi8OU=$ZWS=~8s6a2;a7nuJ0bNNbE)j3P+RVYGoCcv0y1kke~ z*GC?NAgEu}l?^d+{=vLed0_K>(Fdm6pj)&3wKi`~f-UT3I#wv`^KBBa6 z(UWrp4!M|n>8i*B=+~!q*S?}nSwMWp;AF7qG9YcZ2Qp7s+^FCW z*Ax2!=|$qH>c2~m|NOW3`cfFbbG@IEBvj&qW6xKGHsRa57H3kkO0e3Qy=mFY`GpU0 zi59~5yPho3flSKlq8R0$G3xKlNqJR^#Ek>4>A2i9J+rt*)XI@lPqaeTPqwaJsO^9& z^b5CSa;5+7obMnAti&}ADHe-4z}8=y@9;D^|ESa<<%`Bp<6|SEDWH6^5^Ui%{dU{H z@)_1^n+%#G-j=@!gJ`c0?m#XCZn?nM4jAT^XR0-hB!0#}a6%1If6yf?X+VYIwKbb< zCbD>`zbA$9WK|lGUMmN7)O428O8r!qkculEr`I_c2xhyLf3)#Iztm3`8%d}>U3Z{H z$WvvFMlHna&UHk+08o4E?ke~*ld#o|KQWyA=n7c&?Hr-Vnw1J{z;S#-o=zD%6=1$_SPM7!T`rN)z}RQ_!*y(LpMFNlO0v3-LwWL(!Nxou|qKZ zXNh}SgdL|097bXFEqK{?Xfx5PO8YHiF&H?zJZtY;H@BSN-aB7=6$j3OFWmLReu=&M zYVq`TMgS1rr&W;#t>Sxv#nVm^4k=652bBcbYeRr5_MuMkvRaH!WdFq%pFeY&H`h+i zxSab`lj8r@3q8JJlFb%HVWacnfsRQquL~LRr2T^|Vw18$gFJz(>REyt9XAhd>b!uF z^F|GT`HJj^O?MC8C=A_M%Iywpkr$17hW}aVkXpSIlp)itscche^lk@B#(1S0 z%KlJ>-H*kZV@emQD<-z)f)b`+Oqdomb_ud#-pWAekJlb9`KVvuzFg*JA%n8K7U`!C z4?d7y>kM_2PWS-(AS~gKvTp-MGu}L*So^QM7t;F7uOmS4Bf9_y);<+vve6ccouP6V zr;lrLaflNS2t)b=g}oNhwmLZa>esJdz~eWbwOzFznKpj$yzEpXC0P!ICH$GE?CH5X z6!Ri|_we1)thv}ul1?5;oNMfh5?-IVSdtoAg>*96o9+embMU)ykB%~W@ z1Qmp#B&E?0AuS_azsql}@1L@k@ZP)kp0m$B`|K<0;9_Vg$LlwckiD4f#J^O%y1F`N z9ACkx1x67Zp*Q;7K|l;!?y~&qx?s@pDffwOCr6fC*B`TcNE1H#W8d-TpDK}_cFQBs zjTHu=QXeBK*Qj6E#G;F_n-_t$mBkhqdquX6ESS)Tgfjrfu`UC<{_sHB^J`zZ;+rH- zt$5|!Dc0UTBJ2Dl)O-i#$uE~NM!zd)_BJF0`A<(zRVpu<;=Es)uX6FKN{i#>g9|wc z9XUp28?6Ky<=&ZY@dF8y&|3vM3N)m|?%G^mx?V3}m1q{s;4IWP$U6{^1^qIhhqrh} zUWq!MI70d((~0bc-S~szf#LbcnMA)r$qv;pvpO{gq_@6LbcVFf2zzD=vw)-#j zG4t3Hp4eBf0&h|d&Z}Gl&6L*$8Ipaf6&%ECt$tbyTEm^O>xR>&1g~))cvnQ6s$WQZ zoZGR6(=z>9Xghqr;d`QxT2Oa%#H%whM@q|vyRixOB1J0Vf%8X0VeVY?rx>nqva}bE zB1gtT0#6JGm6=eAeVe{vYRCqdDa-Qo5G>_!#i0t3{1&y(2DD+gJ>U{bjl*vj587Qh zwy+y*25r*|lCfsJmVY%HzClVc$u4!n!fX%aYj#YguNKGY>Wsh@XWX>7y_qBbUO{h< zK5i~EZ^)Ng0-aO%i5A2S^B*&bhITi7g9h(tFPFfGD7NjF{u_uU@@Z|no0XO6b3<5A zQ}@qb9w~xJvW13z3OA)C);DFo&1W~Em-z{U;YS=dPwCP-H*=v=>xZRn)#wa;v#;6HN3u!$xJo+F=R&DsDB8^^X z=$j&3lS)?CSzzxI^z7M6OE)fWAuf#q(x4xH1HIH@B>rorV6vA%O3$ebJn#+H5j-H5gVgk(VSS1+G7XhnXU@6&%~uNT`+5i?_poi& zCwTmMHHS&R$KT@LbOMn^_gBou#EygV501`%qJ!8``i5nERNlRUr`PN#Vj7610=T|7 zGv-LWNp7Q8R(4GeRpshC}{a5*b+SY@QAofLF) zeXCMeNX$zR@^77!rE5wMU-&*#nNq3y4Gz^=>hU%-3gx%MzlGrXS1cC zOL3i`X}FafK@}xDG~>C`XZ6f0*#?r7js^L;jWr(;Z{N$UFrXAENE2&+r36G5IC~H} z)xl`IEyI+_{yONXI*a0s8u7~mufk97^REyGyi>AoOy6UChn*|mNO3057a{cL>{vgF z%$qL0vbnzC_0GqS1TwKJ8YJf;7$I7;Dtt;0mAf9Aav^Odvx<*o6pQRwuM|%c_`OIF zx1QLeN-ZcGlGtQIWg2(+tY2}JZrp#9sJ2>XSGabFJk>=rYONMHKe1c35#bNAof`d8 zFrcy3(m4NFWJsU+-`=&<0`}|d?6ZZEiEEb;T~fz<0Czwd;L^@8yfYKvHUBDHzCbrc zp1#9jZHPCV*>b-2$8W6yE&c|A)m!1rZ#|@4UeQb4jfkR>6W97>(xnupAdO>>;3#sS zGn2Hk(AEjpMLylT`7d?(OEmVRL*#;mF{1oB0%cnA$@P|ZGI-8h=^{h+;q>cc@5(m* z`io2IH0uViis5DipDTe~x&fi1oedF*QZO|FR&d-YzyQf@tV~6I(2`Y!A1^v3yTasy zi(nQ^p!LY93R>|4MBsFHW(EiqR#6Es5Oai}2o- zr!-qPQYB^7cqQjHvkJ!CQ_uBmw|11j|4!5ia$v(57fg&MBt43F@gkwk zPZB0o({r_*k$23jEq#yevGeW8>-HrB@#(`!x~rbev+D~q?kpXpqvY??geH~2Ch$Pu zf0CuDIxbV=PGbw4OC)t%>8jE|tfFb2J%5+*OcKpubmbub!GE|mrFXk>V(x4H5EE$t zYeUpRe3rCOj`v4eTW<6f)D{*zY@>kUBR}ga*&2!Z&ns2cG+rj`?pHnxB^omOzOPWgoP_$(8x{=Cgnr+Qm=H^e5X zT;im6IA=er06X_A^_h!*%VGV4kaXpHgUQHUK38S)U48Df^3yn_b7q%egR)ApR>qm# z{lh%?#i+ zUkVt7L1)=&9hrsoqEoOAPkA@co55Sc674HdUK#c**mzwDxXs= zT)_7Nz^*%eaLo)c}!ruDK>e+mJJ}WGicn5}PCtCO?iNdunGLaO=CCjmO zG#m7C?DRoWI@{)$W?;CY23eI2UWlyPdlL-Z)s!LbD4@Mp<)9Fl2Ct}rvL_8Y@QBt4PPV8d-bS@BdkLyBaL=AqsG|j(5ZR0O`MM}DhB)BYo%W( zR#~M9xXZi<6H9Kb_oeB0(-DI`xWcus#jh3JqlUbNm`?f0AMVtR(>2=Bg*@SiEVOD* zlsMi5`Og-t;E~WGS0UI8>>Y-8tsNX2)5F$|9i=Uk>4*7HZItJ&yC7d>T0DAkQI|8J zJ3b5a7eY|W7XIb%WW*T2o%pr%8my z#qiz8S_x1p#0f1O>4II5f>nxCB9VfEpQJ96>5L2*+-V^>SF_lLnWlS4ch+M>9{Cl* z{2!7Hhkz>KLZbWZffiqDG&YMj_cboqux+u(r|wueswx3nLAc|c)E8imu@9w3mlcmR z*tGGNy&i2b)OP96+;LigR#Rs750?qae z4Iq+8r!P<+P(w~*WjH$=FT)@ts@!U(javmlKj;dVF-cE4Ce#5HSKu53AqfxZmB(o~ z;#@1Vd%CPuo9?9ZpPu}6lw`sZDuYu;0oLs+u7#-M)>4Q_vE~LL`!|17CTxKPm6>Uo zs>kU(x8NLoZO3)oB#Iqs8av3GdE%$<=C&~ms6#4vv0o(*d}&LXHb21m{G;I4ohSUKr$zOzFtEEJQE`w|Vd971C6EpR( zgW30xsVY&lhC&%TrCCeS5qBreefqLss{OZum3_SX5m|dBERk@Q%H}#EtoB>1^TVT` z_Ols;3)OXWo;6pvs+#)r0FDzU?Aq|L?T@Y& zAA2Xxx~Pj7YIHx zgc-l928z89oVZJW=-)_PP{??r|^{Rm(mcV%6* z_&WsuROfq9%HJ}mHo4W3&!B2+tL$}(xq%Jx^&4g;(?@o9wk?R;zjeMLwsu<6`S&A=(#ltK zuRu{jn0ojeeLElN{oSQu&c2D@r7aEyZ4r37Mh(C#$|f^0&y2X2$r;^biP{J}%Z;9y zQw|X8VK2qa4zEe+QwIw&-oi_Mfv70)-!tbsj4@+Rp@ie|}@~H^HL# zM%?LZhiKWK0>-W8;T-qDH&lbO#JyDI)gb3m)X~a#_T0#goA4vUt@%FO;I9RN6$OLd z93WRsOBLt*-V}5lJsEYuoWNHh zf-_Kk2_iusjbFqtPRo+M@Ta_w6LS&cnfs;YkS@CaFEr-;m1tWTbh$#xdSWq zzA#FNVF!stk(x5{nt6BKzn+7rAapH+0(tcZLC0{KZz=Ik3=44$?Pq|gf5Rq26zA8I zBvp2Y4$Q(iupiE45Ltv5R_@RZRAy3=?AO!>E*ur7wE!?H5q!G2x=yo8rLpbVQy2LH zX7<$w+JfiS{&d*9(~#&0V9qm6y0ecXH}WZED8=Yn$rf@!_m;cVfPI+AhXL);?f$cF zxLQ4<*C+e^NtD_u-Icg*d@HRjgdnY0Xq5nOk_3X~gN9d6-^U;Ms9=I2UXqYA@3uv! zXRYYx)OI)Mgl^^HaPJP)+?TG~OIyB=<}(%ZJelnLwXLe7p?O4r<^!{auZ^m?$K@Un zi7FEdRC|)iPoIR_dFrL(c89pHKWPYR=>C0O8Tq+uQvu_I2SCyILxH-w*;+ya#>rXl zzea#R0i=H7y4FJJ&Ah_z@VN#2y+7_fh;|ZX-;Myo?Y${2#rn#_Ao!FGRkMchhlB~j zxBB|}BBtX-3~x@}i$)w?t`!Y1tQd)Q5`-F#gc1V3)2n5IB>KM_^dc}$s+9=d@JV@4KdryNRlwXKkG zJ{{0BNYqxguB&H98i!O|5F?uW#%wW+PIWmMw)!1c1JV&YF8zbzIrDdW5PN)H&OT`X z!#~Zi)w}b_qWE-BOL5j}KUDZ(P?+9>J18bL_?*!_i%XxHT-0^#l-b|4Qo-c!R3v}8 z)wdK&M)S;N=K1-Zw9SWVrHY3yd1N()yX?Dxf__dQ22)cW?8O8kURb(FvS0dnHNa=(mDeXR}t(N8a zrS;%`9y**naqKuB258BM{3$5dLbly!IFZIAOy+ZIXNg8 ztkuUT76=!a(V0e}&tN^=x#_&mP)rSK)0c#|vZl=S4vYYY459yf%ny_1JycbHHZxwD ze`y2?MBv?j=96zr6qbF6^V4x%F)Nc*;1tE<@rpj5-o0~HOQi0(fodb^pn@!L!6pDM zr*Q=?2S5bJYk+>K1BW5YM5bFWBZ?P1IjHLZ(#6AOZsl+M{_B6oTF7djiQbR|M@zs+~c?Y_IZ{lYi9qko9{1Z+FF)sgkF;rf!72kW3JyA8QU2LPWrVp zXLOK3AQ@jb9vS{OB;gK#3()hjRtMz>!A1C=a-p9B`y=ODkf@-FM=R!s&l&0xWrn+i z&QsgygH|LS)Mjo*PtyQJy>%C92Q2Px=%?5O>mm_p;-k)zR!3M|dKZp;k<-?S5oSO4 z5~E1EqaO0bYoIEbmz;}{5aY0XyIPB(*vKm_N$(igq7q&tM8#pZzYFW67^!A%866+3 z%D4m{DEjHlGY$kJ;v=Mevv{45A6geE}` z;)~xsAr%@1P^UorOA?25JtZ~k2grZ2h<3mUI%;UObOR9aK0o1t^y2OD)3NklB$c$v zPe3NAKTCaSr=S65@gISSFN7m1`4;laapWaGv3nG}4z&RGr$EAUbkPCDF@Vdf(PEe* z3vTyF*2a$Ixe=ik(P6Ji5%UJ-hZ7(^s6t)P2H3l6Q}?KTGeNr3!1pW)G%!2d= z*J^@c3Qz$b0PgRU2Tg-L>I%SfPG9|gkl)!$ZCG*2qI(;)%yTDA$ipaNp8{RG+_V1n zQ!h7|sv$HVN!$3J7GQ^TCSVLzlfhhm?bjl(y)-AV@lyTH27xMn9WZeqHabU+B*rg2 zL+NPoDF6-z@~nm9sdg%UDUCKo)WFc#2?D39Kp-d=?c`Ocn_&yUElX^{Q*m{eDZzqp z3~t>B6?O6Fh`tV!ZJ87`=Z_ioIdeg^UGK1g>RAS+32ms*bq2k4APv;F$sjV4WS4-e zv4&c%9L0I!#8^%OcK(`*eDhRU*!avhV*)V-=<4nGl*IUl%mq9NZyc6+!)bXdiibj} zO_~U6U!lF%<90kWxS@Bz1V}Kt{X*~J0XQqKF>QuX4TV>)N~XRrwKDj%31t$zeZs6> zKZcax0A#^+6-*tZsLRjSG*f-AwRim)#6-v^sZ+xM#i^|_0+TR5Q_!<9g35_Svl*X7 z2}sN%@a?c*3Y**L3_u)*vJS}We8QK9B1etLo(dqc3VO0BT$oq|cynn!Bvjm);Mgs` zH$M9whJ$XvaJ|oDIAgwugA95`ddVvPE*G5-lY$3kVVg@TLV0u7L8@CH{cgbiS`E69 zug%inV3JpSLwN1y5)Zs72{Yi+JX=yWp7RtGXVlPeL+N}|AF6?-TF^j5g=XW_A6T{C z7NnW&oTugFoc{1NX((O(3UvA-^aW~j5}^RxNtqgi_0AzPDDQ!!Z?nSh8ffp8}3sNX&)4z6%! z>A;aU6=0{A#9C3`sNDMR&WMD0_`461QFCATgoI`(7Kry=y?XOzi$7dC20M5&7omT` zwmggr1|BCT*T@LBkzt!@#BCL?Qrk?5h<4b~6_`~pMr zXaISUDw)OMJ1Ebte1}fKCO+V$sCsJ}pcUr~#N7i{YAyFXfwKD$LvC4RGfmn>^gDjK_Ov#bdSoeuXV~T>A zNBx&Ie={gFxxrVY(K_~)>s;SE*N|!PNGk|%(E8rIl%v6+37p2Lve!;?Ek|kxR4{K0 zdjH>Bbqe9dc3M?>L8D9$F{;Re%ML<~=Y~LH4~5I#$e-c7c5TnPj<2LS(30(ABs1W! zf#)y*A4w%38^NPdz-pPr+nZIt(&1X7u>W1bS9-7zX^5fcZ<}O1l-=(}=;9<_8(O za_F#8$yU%_HpHw3B^tjk{`4B4G%NsNV9;#LCnV=F^Y7AP>oBKOUrD;wD;IF-7#u4E zyd=asUb{@(i+QfLDn!aLBGE8mkTVZeQbAxzv&K`zutIz4mkk2CVRqz0vV{vY!DWA@w_M1SE)dkN{haB`cC072jfjy{ivseTh^H{;(nPrr!snqYgs+zl{$^ zjR9zEKzKcOne=dXKH^*zm8J1!rr}xNQc%GFe=SucFRC#;dBW$b@68t&`vp5yH|~53 z@^;&648t#tB4N#Z9`Fxj&MFw?(YoHJ=Dw0K9Gxlsa)n~}7+f$QeMZVs*;s6CZ3jqp zsL`_^-kUsPbY^DUU!7q+kJp9#uL;?dqykiS6?ygTm^K?I1!`&GoQb8)N{-8L6o(3O z1VoQ%!2S6-Bz@U-`!jeGwDGT?^AiS+Ps7m0Pp=@t%I-D4!+MBQ{?0Per6}5uK(69~ ztB)s3eK|n-LQ-9o#Cq`?36k0jy`H0 zoRLAGV6Or~#Hv?OzE@9Url9su+I(aimh5RyZOjZ2`rJ{v|Ig>L2nnN-2?gU|EH>8OCeKIk!^D@tu&?#w06qMx9n4X1v;a>2K zeU&F%IuZy=&6L&^i4@lDV|;yK2R0NUvFJm-}xNef?5}|6t6!?MZK=C znch609qxi=9tjt6ho4eLh4nD1V7$~lrs(vkCObJY25D7Vie)87D7*YygeLx^q@;L# zNYH|L-^`uvcAIM?V(h%E*Sw*hz5FbI{h6yHn^pNhd&~|kcU9c*t)}hV#2=yPhp8Z6 zQG6eu8~2K@2(`oJ4x(|v0%#7TH#NEj)6x)no#pwSXEGJk&xafdQ?7K} z>clSNT2+XYA&7q=7QN4L@AOhOI<7LBk#D2lsyfTh%So7Xr>(b z9X3d9f{y@_=e*jl5Wwq)V6cS{glNy{Y!0P!kp-L|*gScQac><-MYrSUf1erDoMsQ^y@ZB#vLS3?;yt#fbHX56A(&jxva`r!2R5}xOyTy3=SQW?* zIs~+$t?#2|AF2&+ZxkpjBVqnASSh;UvmK2$C*Rli-s}Nmz*v=n^*~3|T)wQxM7X#b zAkcPs=rX8m900em!1$le?3D$5o`BQCxz-?>xR@B$F|*#Pu0`grxVX;t=EXl^X`yWD zo#Q`U23SZzff`4cLImvLzKF&q3LS-(8f}#^DZ-lw&jS|UgLeE;*`9hKPeP^^@(4;t zdOvPkyhlNbB}7~1-93ur)**|hRKCKc*yq_K?0zp5)W6N(P-sQaIwES~FdPQWDDE*j@Yn=u_}KNpOcrY%KG*x0SzQsKluf;@9VdbvpW1qA zrs8Y@c@P{XVvfsv*R$??^D#DZvi|8-ABwEK)=>gPJ5+PJlIzMd6UcuwhZEdr%q`#( z-AVh7jIY&*PpwJM^Clog&PRS^9??1evlas(l@X;2&s~BF0$b-mVD+-IwSD7y{~&ig zu(*6(2?4yp^Krp@AY%_QH#kfLmJqvI6t$kpLL!IM#E#BOK;lZ{nmW$Z;R#O# zKu!VFPLX9h#l%j9bi>02y>{WW)X+O@*sWYn&RM30dy_?8+{V2V(U|DfA7f8ii^SX; zt7qDR?pZ9w1N{U8UF-G8fgVC=%jBR6^v@hsw13%;(H&BsM+EzHwpof3D`z%sWhR-` zcQZ0F2|Or4ARds2>S_iQman@;uLd+spK0G*21xIZ)_-+ZvOEDtE&le0QB0ub6!LJ72zeYg04n$2pmM*sf|5Fwqfm5M zegH&@-@di9buF$EED%5|2>gx1h6jPL;4iqrI3dB%vbsAGfDB;o!o?)wJHDoSCN99+9XRmvD!pv9H{MU?64p^m;a>V5s>j`l)XilT|%$ zoMLB_O9^;Q`%4Qb6Z9TEI`V+784XmaX=r5KMfwTcWu@vk1!x_6S)#B8BEZQhq#tgN z=3@voYAV=MNdb-2zJi9^tfJsBrP5wBBYgsukBACmnGpiB zisV8{L9oyHa4=%Hko@e9?2!*!)Qbmg@Gs;)d}#nh@I!IBHoM1xP)jr9EDV`DVCjA(=`iXXidCD)cL=CPeJAD27p*R5ZHh#B=2$ zUMN~~JPt4?XfH?dpv@OeMrr{B>Qi%e{$m9l!2Iux)Z(LyvJ$Fe%!Z7~7aaMQs*v6a zFA5y4E-+6M6xsJD@l2+h8;p`Xh>0Tl?gU% z11W&?L*Ak*;Cj6FNg62FHg!3JikWLQza`sR#nPKOeI>2^bib|s;XZ(7M^azg0s6ir z@Hl^KXV60q)AJqocg>5*0k}g8K&QR9C-7pu>GOKnWI5v>AN`8Q?w_J%#8$m#epy+1 zTxI}#f5t}u?f4-Au>mi6j=_eq{vF1g26un$kyF}pNMP*oivs5mY@-O>9^nwv`B(L@ z-C|iq&3HGXUq#WezrlK3#*a7_WhY^{ii3kg(2b_=R8pYOz6Z4v7x1qsFL%LO@~PXU zbZYL#SBKNy?`<=O*qCxR*`7OIV`q>nE;G3dA&if7?)vYzZ8N+f-0@1>9p7L%jS2Jx zL=nN~+g-pkXgn7Kvlwz@rDI@$at>t^;;J)Hw3TVbj6fS>OIh^?G zpP#^A>|PD_!n8Wj@Q1-pnUq_X%Qt;bd?76un^_5Ez0QD!8w*A?Ra*$vZ)Pc`QzK%HC)Lxc z`Wk0ctXtZ@f}fqV2)O)hg<)C19$l8 zmKMIgAtOMTR|5spEacx6TmGvPe^7`zY@*!1`e)$Z|K+oYP(Ujv zI(gXw$S4~Scus)P;`W@p<%%v&N`E+UrSAl`ex3$S9DE&Ovss%VD$fj;9iCQ{MCzy;% zT!7?}FW_v-j*h}=00v>AE0G4JTU_Cn$zd)28e3r)_1D$fAvigd|8Mbeah}g4OhR9g}aJ>dn`Jf+dq;K?em$E0faV zp*Cv2P{4~QU{)2N3@8@&OlH2~InHY#&W`Hvt8cln9C@d6GXc02_MaXQRD}4{4j&2F zuirP4CH1>ZHD3hc-D$u9Xr%7&Cy1usKJR;%YK`}ULcGWSbai{Q23+=Cs$eRg(4?F@5h;A`F&`wp5I9(F~ z@y0P&pK;bEWcfufhwNhTK|k^#)Lv}54Yq5LWcbfVnnwb3#62xn&t=ZPG5?Z#6#th? z;a^Czy(VgKmm=lp!I2Jpst1<=c{8e5IA8l(Pn;icafeRGXo>G|#S6nHDZ)Qo_99tc z0bumE-aFU;1vCZWnA~ci6TgEC!`pz$xh{bbiIJ(^nmT!JivB0uEcQ~&JFa_ULz$q z!^X;KA^{@~lxKdI$}^T{VSwW-Ma#}DM8DbU;&?S@Jkoesh3se;QLOe~=psiB%BRKYD#$s5|_2KVV~=uj`sf@h>R=1FMAH0sGn3G|adtK3N6h0i}Bd148Q=YCv8Zzl>*8zkb96nVN*?r;GjXe^D%f@14mw0#xI;>E0!r`W$aUuo4 zO0wvOHtAb-TVr8*6^pmY2`PyNdI?X9-bc`S01@}Xf?hM_?&xn|Cd~PSGKfE8U$S_v zF+U->fk(XIa{V`R+DUDs5n%wQdivT~*n#R-NMOrbX;eqwT?$QwtUVnLd5Ul`J4^y{ z1rfS_vRg>FK$3y3Lgmxptg0V!83EJ!2EEfM0g<~!0v{4yCJu!34Ecn7G#lMl2l_G} zD*HCJj<0Jw?U4HReC3V^{*e9nb=X_%1v$h|oTO()^zH}_<-tl}J0yrOUz^{#vJsqH zLmt77{0^YA$%)z`TmU8^q=0~KC0M8pWQYuFT0RDK(PmW7MKdH^(P%AjatX0iKW#X$ zOnzr5tmob1NSGJa?Y&g3)2ZJ4aj8*EEiU+d#W3icX&w z^~O*Y55!LA8c1_TpvI;-6`Ru-9lFarh&!MgZZAIl_qA=0A`~%;oB56{3!nzomiY z^Y=Gl|A3fR=;)RQgG%hJPabD4EREV-Cmc7Z;bx>A!*5SNQga=uoh4NnYd~J{81Sf_oJM zeZ0A6NbkR#>zPN#w3#FQI5skactA+qQ_Z>rsi35rAC-)*W#&wSrEX9^_`BC(f0FgPdsHUW7bd*3GvNjIu2Rl@YKOe-M*7T!Jz;L3^ zwA-DDvOZgS2GYs%>Q^(ay8<a$fAfpiu&**^JJ?3LYt zWZ0*0?1gmYQ~mC#AAA*}No;b`o8mmDkEt3olA^41k;z-;?F%vAn$(*W?Xuj7Ece9u z9iHh-PciOoh#UB@xfah;E?kNnyHzX4-WL-RLCxW`gi|l*(*YVayOQ2ieqg_x2?dJd z{~*2TUFN1|O?Mzrw4{}53)C7@*TU}|l zSa$~*Z{%)x8&l!%@KJf%Cco%Ua?{HFO~BWbOL$A;E9x=+vs}jI$~cF z9#}&6PFRzlPqQ=v4YVZ$qz%@g{5yju99l>Xar`huT~YR2ny6*RLw`EQYMz2?JZ`QwZ>qAABFKB|cDO0& zMbmo+@TXfov>l9sT#4R^oRH|3)d~8!xer8#a?o3Y{Zz~GCtkEU7x#Z4C0yr^iAP+W zaMti#*3kehP^1K8774jKBSMhWv+?^}f$PSOHM3P2!E1A)(q}qzv1eF3>q&1PU%{euvnE zT_-MNCLHk1tM2>`*TEcpZaRh%QUWi+ee-KQh-7VmQWT(R>prKVXc(6o0kK~x8LMRS zB^4OQajj_kSaJ^HD4<%hz&CQkLOi5<*dXvMdIXRT-OkA4GPH{S+9>{( ztX8W@vx0E5#NsrdWTLSZBs(1F;*yX9FwMn?)sSejT~_CmkvU)04Zxk6a`Vi4=?@A| zu&v#cln9eSPpKE`zB4Ev0OFU25x6Kb`zLfw5#uy+2h)-OEbxx`ES6u=(>S^p|eOfUI&y*L!40aw*E-xsRF9)N*L#Na#9iyImZJI)B#I8qrV<{n1pp4SJ{AQx$}diWo4U^ zjm!i=G1JSHzri|c8p8fY!kQ)c+f;VoJI-4)%kaIb>^Cc<_R4<7vvy$^5;@xSVAq{} zIg0q`OBoKyLIA&muc5^@c+Gh+JWhR0FqUj7IBizZ34|dWsIYy(b*_u^5eDv#ped_A zv6r7(;8Uad`toi02{I)R7f*|v^tf&eIrW6BGf7OYZ%p$ z*YLc+9l*ysIo!z2Tx-1%>+Ea5c|m4a_}h5Qw5lIfQX;AomY6Kd&@{NhN>?*{=XxU$_9ow;{pXk}olhjUr{-hD=JHHx zNGVPJ_Hmz(*79{bSS=_{UqmF)P=#Ixguzt?;mNU}0Ol6FT}nW%tOY2uX&Ra4o-|}# zscW~;yxuF(nZI^c2(WyJ-L*JIwKYH3WQ;(LKPz@rgrpK~@rO>oS#Mw--`|wuA{kVD zTr3Kbo<&@D4X|;SDR9m#>iw4n&5U^`oQUI9bVl}_Qui~#Q#_n?He_?W+{vGpQ+?&p zPDYJ>5w|V*^R+@^sZ`A#Ko~=+Ih?GK9v1<{2LV8aQmE10rd2(0FY&IG7sZd^f(=gE zey3`u`dvgusKNv(4O>5YeW~FIX<}~_boh^r&7$9mnVmi4?xROPFJ8Qt(DA}Lrd133@IrL0G4jlhb?fO*h%Aa;YfES7@h7ew-5R*SN`EFWvj zz8zQUy&W4E<+}->?-gp7qA(|B^^E-R>#CQNl~C4oris5 z>=@b)=?)nR|DXt3IBI)d6FH;Z*6Yu#z!LdYOq@ubevXG07)B&WA9CS~`M`CsT2x)1 zzKO;HusdKJb7Bm68p%wM+uWdg4bR0Q|K*miKX?@W1i)2Q&47n*O-@{y-=Dv~7snx; zWA)!;&ivtL-026G)qNm2;5t>PtaJ|{yJ*yF?JNE1uaA1t{Mh0 zmrMNab6LH)?Of*Y4em`*_El9$7Bh1YHEehc(?sV{8zhFQc3$T{!yZ{4t{j3JfWX(X zWbL)o3mV7DT)mq{VH9d&%9H0{2U13sIQ}K*`CWwvmZ`S?{hfvxHQAi6tVE!Hl7&Yz@5m@{9)I*%ccfO1&?f=2CK0A6 zlv8^%dwq1r#Z&3g|F0S-~7rpgR>Hv0o*^hiauDh&e`J5ttxPi zlXu1X~vL_B0I{A=b9^FH@ApJ1#d5W6V80cuJ#ZSZXgKj6Syu!0oJ_{ioDN%G5GuMC`JA5>W~ee;Kiu zj^}HD@?S7F6VhdrwX8zeAdJ(;gz9#n+BdQm_mgIGB;NX1{;mQc+sMc#?g+1bQB^B1 z9!ApP3=AIYRl%T4LUQu<9ZEYv1TX9`8mj~X&}j+#$$X$T!u~=P)z%n!lKMfmK_3!` zUVRNz-JE5-03i@lNOCn*i6|p!I+QF{d2)ea6*_Et{RCnZ$AjA@R(!ie%S%d2E#}|4 z0>RSrwY=c}4i5jx8lkv^O^#+H05xS>a<7(8e2~yH;W4wS+a2>2y>r*p<%Y<|7DNjM zs4brS(;RTD7*fSAaHj(3yMiIj#WNH3P~2xFmJNc+SnMD^ur&gxncNztgG?XHUmnWy z@d63duS>X_fvwPwiUF5KO-l6tEpVom)baY{ocbF8@;+QTQ^7!&-rnDNeHN->yF6J+ z=tDx1Z%`i|7YA*{J?lOyfqEH7 zyy(VPxC2f8sEzqb&o2Pp(tyfdKb6vYdUM~k)WGKQ8|kR8?vP?=pKPhv?SiZ)gCu##+HACioQPf-Ph^ zQ{sAyPN69D_%)UkV6g*7?dR`2cH4e7gnssc#4)S+7arSlzw`J870?%%KkXgwEWU#@ zU={bFb>eprxs||L&X`NdSf-g-N$$<(#wGQIM$;T3V^iZ=t)tT)-+ETn+q#Y!$JN(c z?L;cyT+FT0B&f?RyFz{w{HC7=GGQCdmEWMdcT)P#+p-=gB7)NT73JjGLmVCFq}Zs# z;#~;|Gl~WYL#zV6e}?b+1pO_4^X84;@ovPY@*r_&u)Zlsj^u<=&eBee`Wqt>6WujyM>m!SH5DtuZ1ib~uEBS8pa!avbRQi~(qk`ZAMq`C-fFJ2xVvtA zqI~AhakJZi5j4(hx7LBEU|vO50!#JbNP(ZS?NThS+gh2dv~~~hVRMxMJHj+innP8B zVDx#~KFk08H`8sDb5SBBbgYOjYuhf|qsUze$YO{mg+`CslqjIw;Z-(>sc3DiOTaN> z!ldHkpNa+@;XBzTgRU9P)-G{p*)qCFjgiR&GR+fEY<29Svz?v~g zBEI=v@!Hz#Zz@&kf`~G(qbcaoLGdLE0$XxV?Ev5z6`}M902vLG3lZ4}GKfU&wx*H@(nf6ixPhwn? z#>CT$lKe^uJBQUsi`!qkOB>Rdtg&}BuRB@QN#b^y@EoVN>>K&WPca*+!-!Ft0(T=? z_roHlCGpc+{^uB#?&m%1&^kjTcJVVqnKysGY6wR7`T1=N_s#Mfu6?u^+h56aB0(`E z+PMQSl~@8#!2a_j7ow_oi^9Rw}dusjZcCm>O(PM6YQCZSo@XRH zl-asx)fiq>RCM8VT(-8z@nL%=*^fpRx$wa7!1nF64zGAu@!$K?H~2jusE3Cne6mxz zMfq{l`M8{;f+{=kcjXq^F6P;wY_OhSd9S?9w#!5zy!A$EdM!cGE zUp@xy?&nh}-s>6i&L>tHF*W1Au8iy%V7UgPL-HLQi+i$6VO$y(+L1 zsXGpc&ULBf9b-*ucC*}&{UGauVdKKp47$M{%er7~TCHPSk`(N(%0X;vijkUGBKGLW zBU7gRzR&nbcg4%Jh#@71-O`R>l$4U@+Eu{|vdc+u3*t^<%fR}1)MoD=qB{H6sJ38+ z7x_KE{~#*kX=iP^?9XTX2tf`e=QAUTgBmyVSc2@Xe36C%Yr9-%&HE=Ku`0y=#Wk3( z>PYn?Ch^r{Sld@)ZC(fkk{xBszhU)N4qUn^bvk_1Vxnf@ZoOa3Y>hFWO*(QZonYwg z=~-8ovmXJzif*fg%$Uy_TUF!$!ZH7a)+e+i!nqmooF z;)DI2g|~9OIkiOrx=Q;qJ3BkKT2icprCR()c?{~qB#VtG=M244>Zur9?0+E>wdsAC z!7wZr-Db7AvZ7M|cH1xA=lP6ZzUCA!%6=^zxqA{mu{QLKYo7jsS38~mAI#jhI zfW7U-_Diu+Ul3%sw1}EwepM|l-jd`DgrYY9xXuDn=OS66rHRDw?AY|Sy{p4z6>x<=#%>8SkvQ}MrX4G%Moi|>GaRo#;}*loHp?hMrX-bgyZ8o}meQFWZ`8$gi;zU*$lvVH*7 zvt`2O>C!EA@8HCBY6y=_9C4`Eu+t}CB4w!8^DI6Zk#;x6&lH!6wdKTHK|B}HtB4P< zf6?OhjqFK9r;J5e^h)8Bm!hI#Ba`b6KsmwLTpEaJ48k{Va&C#XA#~_lR9haTVj)?* z^&VVs%6V^bY4W8vB$9F`$oH=1fAR?WQjC6wTSNDEPXiN0&qp17iNInDVN4ubHZ8D}vX@UCya! zs2tesVgp4$A#915_$zcsV7_^k8{w;lYejw#Jqy1!Z$+Y?^gt{W(Z)-dKqAYGoqPU%CxU^5#g+dty1t92fRYY{r0Ry#?% zcb;OW&W_VU$UYgN`lh2gJ{Iy0=Jm!##dm0v4Za+*%WGc0Y!*-nEG6})QViWtLuK=O zk=^$GUqn+7OW_0=9vgu|%jyOBK)t-8f(nSPjE{%J(jfc_h*>F-G6&W{q=hyb$R{VBI zbrD7jcMif&Du}MznjZ1YLVWTX(}xt%?qh(k=+@SzgI;vQkqH*GI~w0t`yl|Du*dCI z5ay6-n(s7m@``b*A2&t6H8lA=^FQ>r6(O_P5Yi`5+0}!iJcYO3QWIw@gA`39?dN{` zD(`V|a$wtI?N*cjC&5UV_0v*3x^ ztRRe9Q3iK1lY)(n2NcoIkr1qO?pL<27*_y0IV_L`6p$HD3ZZbafNP~ZKn0q7Qbzy$ zXtGiAMP(`>46F_KiE)9$Gq+DAv{j>5_#izyIT<2l*L#F6f>6gn|M7xR8Yp+I%14s4 zg!R;s>duyXM^M$hdr9@?083D04M-xbYR>D+?)q?l*5UQzih~WTBA=I)J$WCBdlK3t zlO8ih!7bCZkfWyst>OEqs;X*;YR_sq!u14x9Ke+zA9F4#(A_H7_#K;eM%Bn zVpN;%wqJDy<^G#vYoc(?RV;)T_vC#B8yD3U6&3i3;a#HJ&cI3?;qEN>bGWqzAdWVo zt|VHM=$sR}xE)_s27D=46Dv1t%BXa7DrqbA@*Rsq)-}7cu~UCu+`!k4Fdf+o*GIxn zogZ9r_&IBe$~qamNp#GKFXaVK?p0mFbThSYZoX)_Zlg(1=~#D7u|e+MC^!VBHsew) zr+unV3B<2{mCZ^wpUJ2og^Yh3{MDp(ZmY@sck1owO^nj5p6ua2(qaq;_gD4e5_5`B zYjXzc(krL+KB}FgCcC@4S2ENPj+daZf^x-mwio6^Qj2C)TB9U;Be&jz2@?%!8qzpN zeFf{6llX>*hvmx92aLfik;|`z(Qi#zv*PLfpAvtyIUIa_9s9k4bjtK-R(zleD~VM6 zKPkS``HkoUDqFc|zPWr;W=5r3A+)$LYpLEy6&QkkE^qKA9n74@&40IBENqwYVn+^? ze|83g!K9j^!0r2%nI~tQFtb~Q;sa%9EQsI|;u@m7#xpjJ^OwAoLvEeyQ24_QyRI;==%zpl8Hz(*wfUjvrnl$UL99l={4dw>1ZB z&L2Gg^Liza2JMh@#1q`pqT~I#lub|l<+GI$MIo>>l>%PhHexw)&zQmqB@tXm9!g&{XTfqGM{Gv_3%!vqK8a%=AYGp6eQQp6T z2c{P>IIaCc>E(MNI>WWXsX%smrqX|kC}oEDTkoStd{X-*8YA91=kYAj(}^vcME*{ zpBA{F**Psoj!t#@T&ly!ZHWi}bY^=--V!?!+2+7E_Zgkg7W_Zn+Z4`Fs&{K`}y%TTX$$xg$YHM#deePJ$BPyYo&Y9`x^Ig=%8Cp_- z2yM1TPk`D`N=i!KLr?1BaOZ&CA-q54^7Hox6rVqs^Sq#Uz)z>Q_rA{QPpo#p3@1p- zW=A0OC$)3Bfm}&vW+s;v%>Vq8jrl3ph_#@Rxmi;fUSyWI-A^t4yQ0it{3aD?B|zf7)uw?y=_RjxJ%kSElNuvL7GV^gjJKZ2+mW&P&rYdQvt9 zVCEu%9m2yaWCg&3mft7+8~4u>D&xI`(EPE_fbi;~HIqAi-NF7prrh3|0LI|gqw~Rs z@EZS94VbpyMh(%EBU>lWkiSjg81ClB^!(Tnc3)g&xkfC#|Vz?EA{ znp5hvi2S>XA|rX!N8am_(A|FwNMUUgh(x)NpDAI0IV$9ub{4}2Hn(m5pV~GW8hOZO zkT9;$PE#cHgw0<1%KhZfZlEC%7-pI4x|S4Hw)1((uA>?KkDRG!{p2VU85%4uDoB}m zXYdVncHFY)Xb$_Ir+|r^Ahh>gS8u??t+M)MB_^o4QyS{eG z@dNRX(SMxOi6JND!^z+a3q*Ur;;ed6vmhm{Ez@ib@22}we zZm07629IAGV4YxvZc_B9F7F`rvJEx?x$0OMmwTza$WTnS!FvwM;Z3=TxyS_Xi$IM%JU0= zC#*Zq53d7cfB2@n9~`jxJ$&n^6W?C@%^bw_>w%Z&HfP|DgOESUa|^dyNfcZrpEroN zx%yDZTypan$>8Fp-V?vENNq8$Z6sbBQp0u^ii+;hGhYC>T5gTFkz8`@uP82ZTuK^WCF>-L+ zvJ3gF2L`~+7tm+aSA^9l}m=Le!cCnIr%%G7}Y5RgH@Z-JTFp z=ZNjHP`<`17}S=lXm=Ti$0nH0`aC4?w;ebvYv04ano#mc^dyLm1sj~ml`*@Vo~myH zD)bY=IAUDW(npYWc{d&lec)6(3BYRzRP@HA3ooyNS|b>;cFb~f-s1zC#=0+XCuII` z-Hv6Dn%x5@ea-*UgLzX!K9=1Kkq7Z)6C>3_*T&##(795beAY#@AlL7R zS}?-jNDP8lrmtev4cY1U+jUDpx)T5S!_}7&U$MUO-`pZ`3j^wC&|Gv%k$)dv?R5@+ z?9-1WdZTFIx3UMxJF4xf4*zC4aw2r|SSx>L$ITE~m!E?;NEf1yn@k(qT~GSuu}?p7 z6jhC<+ymjlIZjJO@H0)Egf?4J&dqj2-LSo%+n5JBiL&@FkQy#u?qdCe_P7P+2{9Wx3)f5GA zi6+1Dy~*Dv(DC1YM1JO{dj;_4XFT3(f8iFp_1Nnfo+;x;g{oP0(*bJ_7O7I8iz7}a z`Hi;QULXW&LDL)(iS(VvgAN}ulC)*xb-#H>{JBcIWQg*au9@|-T?jSYOg{B=&nl}O zCn!NW%z*X{&@0@Ii>GQU;nM2YiJA59E6X3&5q22K8&f#YZd~}LWMXn_RR#YjOz6W4 z&igDx5z*#*r30FvcR4VJqX66Lab4rZyryK<_g>g9B8XMp{r(=7Kq}bzB)^6CljnGo z03w)n(ssSn+4&Dopdj{yw*X;azh7r&N|nB97p|P5c0^?$U8Ny5G85YUwip+NLG4{h zR<^S|_ogxKmkU54G+i%f2Y?f)qnhv+PZ@ftgE%q(^4XXQ=q3Y7@&p|uKCfNUoBzRc zFcC&RbmO1Nhq^)YK(QjHmD}j+9V3%lq_b;Es5~Y~Mm2d$-a+MjBP%$rtD`lU}+pj=}h>&TX>TERGmGICsu1*_qzmd>_vv9acG*pw7YpM z`3q>!#X{i+^po7dd)C*Soi8>9UIOPudWG@Qf5g__aze7B5@rvm0Yqtd#+fyCWX0uc zB4;B50M)m16gEtp$3u2{A7t-BeCfbIyz=y8W1o^iLFC5Ws#!UuIFf{Dx~cC=OrPcR zrN`B? zsd3|j4A-{dno8iSc?Xs%xlIoNTR#A-t2)G(=D?~thgY?5ySnW3FZ@qTL%kQ!(%7lW z9|m@Uy`Sf!4cDUESzul+RK}u)UHjd`g!Lj&Yci4*(`PL5;BllRk}L#$G4wLCFm+=B z2OjPS;u{TR<%;i9%CcpH3#6t+q3mcKhglPZ@S_+R@2JG?VgCwKQOqpOmI>F z^;q1bBZk)!8=glU3DmyC;!}MrXl3Pc&-QaTJ+>*-Bh$I6@f125x5j`AD)RqTLlGMy z-vYgFO(=uGRjJZP_vLYhC9u>f%N1Rp9-iX?z<~~Rf&wi1;JGm?(Ga(wpnnBm4v$iv zL)4_g4F+{dezllIb|jIY5)N8LeV~#Mh4dL*#W=JjLpH96mR-de3zg1RAk=I?*SEOL z|J7a4EHQ*Kq*?;Cuoss5F2A>c_Aa2K1II(?$10KjzJ)~*1zZUf>7>1R z?M&AVg{0*dB-fP>2^JO>iRjyTumQyXw5dmQtQk>v$oiH7|DJ`p`3O@`{X9*1(E(R~ zoE-n`x5STtmJ|svWtYKA`%KdO0MOlX0q?>s$4x5d!!)GtZESWf^*By^*88i+jXis0 zFvy6UcpNCA3-98#-~9?IfrFXJzNowsL~t+bLJ?%iuv8AKoQ!VfLOvWD+4nfjp?FCk ze%y|Byd2qvEIEDX@g&64kPl*7SGs34&X{rz3akQ4N43uf*fK6{LpELXIO2qYejcn3 zNwQ*8CuA8XaH!2xDZAK7g~fa-33M4<%KGb>7dUX2ui@B`uDO#65hFr^b1_C`ClKJeTkSfwPu(qt? zappuAY)Kq}R5(HouJw%)!8d{mPj1Nyx#S=xa9;(BX z3UuO;mG1*QNdNeRyQp=o4As|dMjc1GK9!o@_^y}O$^VMLMFv;~`!puuZ(`rWVB*Vkzuj5rPg!Z{bb z3;GD{-j#jdqrd;YBj_U9K&)(!-+T=($D$LEhF%3@^Jzym-6~~gvzO0!Me2;Of;W>C z2iIJ_2JDQ1(geB=y++97IgnsEFK=%-Z z3^g9x+rF}}LCHvGTC!%RujN-b3mPWlCSr1+m1#lfVZp^~Jfb zCtXAhWx<6y{OS!ud-a9zzB~Vn6K=N(ZkVrGXk7o4cMl7kl6q^p|U3W%^PI4{Da!W9GW2sRvr9a4xh7#uMwm&(3^m5bJ~ zjKNd<^07OCVwRI6R1g+Yo$L$nV>aux58yg~qE#mVRQ%f{x0nJ}_ zYFG?mB{!^-D=jz@QV3la3vxGgXz8g*|Cx%RiwSMAqgw2syYO()q5Z%AE~Cf@u#hX^ zJio-f@V6VRCq_0+NMu|9RaHYo?G6W-r)M?0tQ`9)v@!G8QL!r)13^4wosrG1E#jCT zXwftA+E{WWGy~$BW>hCY=_I^0Vj||P(y80T1-P+D4HHs0L5_xN%7I)SnA+8r_q4kE zF^bo5H$drDT|V-k9(IaF>^$ngZBD4O)61gW8SoLzq7KgA|H$KwsZ_)r#tC>|ftq^C zz}bw&y6KI2*!tb8k-!=L9$pS=TCM}ys1;1QhU-IkObFn|EBkz#%;a?Ef|7m@~{au1Xu$Dnepw|z$oPNPt_ zR8f)Gbq@>LMX+E0fR`0O$ziah;MocX$Zmq>+IE7@)V-e71s@CJ%RRN>iKjY_D0dJ= zI;Uo4;$vDTx$~$)5Gyt-`fHWuI}2PZU7L26;9v8gaWGX2$DKL=R@J1`R7{^+nl2PU ww}d@?P9R=lS$Pq~YNUbv|DXSt24wKNsu%-8D+%F1g}~>krarbv!}{_60UN9I^Z)<= literal 0 HcmV?d00001 diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig index ba49e3c..fad8958 100644 --- a/vendor/github.com/fsnotify/fsnotify/.editorconfig +++ b/vendor/github.com/fsnotify/fsnotify/.editorconfig @@ -1,5 +1,12 @@ root = true -[*] +[*.go] indent_style = tab indent_size = 4 +insert_final_newline = true + +[*.{yml,yaml}] +indent_style = space +indent_size = 2 +insert_final_newline = true +trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes new file mode 100644 index 0000000..32f1001 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.gitattributes @@ -0,0 +1 @@ +go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.travis.yml b/vendor/github.com/fsnotify/fsnotify/.travis.yml index 981d1bb..a9c3016 100644 --- a/vendor/github.com/fsnotify/fsnotify/.travis.yml +++ b/vendor/github.com/fsnotify/fsnotify/.travis.yml @@ -2,29 +2,35 @@ sudo: false language: go go: - - 1.8.x - - 1.9.x - - tip + - "stable" + - "1.11.x" + - "1.10.x" + - "1.9.x" matrix: + include: + - go: "stable" + env: GOLINT=true allow_failures: - go: tip fast_finish: true -before_script: - - go get -u github.com/golang/lint/golint + +before_install: + - if [ ! -z "${GOLINT}" ]; then go get -u golang.org/x/lint/golint; fi script: - - go test -v --race ./... + - go test --race ./... after_script: - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" - - test -z "$(golint ./... | tee /dev/stderr)" + - if [ ! -z "${GOLINT}" ]; then echo running golint; golint --set_exit_status ./...; else echo skipping golint; fi - go vet ./... os: - linux - osx + - windows notifications: email: false diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE index f21e540..e180c8f 100644 --- a/vendor/github.com/fsnotify/fsnotify/LICENSE +++ b/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -1,5 +1,5 @@ Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012 fsnotify Authors. All rights reserved. +Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md index 3993207..b2629e5 100644 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -10,16 +10,16 @@ go get -u golang.org/x/sys/... Cross platform: Windows, Linux, BSD and macOS. -|Adapter |OS |Status | -|----------|----------|----------| -|inotify |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)| -|kqueue |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)| -|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)| -|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)| -|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)| -|fanotify |Linux 2.6.37+ | | -|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)| -|Polling |*All* |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)| +| Adapter | OS | Status | +| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | +| inotify | Linux 2.6.27 or later, Android\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| kqueue | BSD, macOS, iOS\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| ReadDirectoryChangesW | Windows | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | +| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) | +| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) | +| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | +| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | \* Android and iOS are untested. @@ -33,6 +33,53 @@ All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based o Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`. +## Usage + +```go +package main + +import ( + "log" + + "github.com/fsnotify/fsnotify" +) + +func main() { + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + + done := make(chan bool) + go func() { + for { + select { + case event, ok := <-watcher.Events: + if !ok { + return + } + log.Println("event:", event) + if event.Op&fsnotify.Write == fsnotify.Write { + log.Println("modified file:", event.Name) + } + case err, ok := <-watcher.Errors: + if !ok { + return + } + log.Println("error:", err) + } + } + }() + + err = watcher.Add("/tmp/foo") + if err != nil { + log.Fatal(err) + } + <-done +} +``` + ## Contributing Please refer to [CONTRIBUTING][] before opening an issue or pull request. @@ -65,6 +112,10 @@ There are OS-specific limits as to how many watches can be created: * Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. * BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. +**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?** + +fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications. + [#62]: https://github.com/howeyc/fsnotify/issues/62 [#18]: https://github.com/fsnotify/fsnotify/issues/18 [#11]: https://github.com/fsnotify/fsnotify/issues/11 diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 190bf0d..89cab04 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -63,4 +63,6 @@ func (e Event) String() string { } // Common errors that can be reported by a watcher -var ErrEventOverflow = errors.New("fsnotify queue overflow") +var ( + ErrEventOverflow = errors.New("fsnotify queue overflow") +) diff --git a/vendor/github.com/fsnotify/fsnotify/go.mod b/vendor/github.com/fsnotify/fsnotify/go.mod new file mode 100644 index 0000000..ff11e13 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/go.mod @@ -0,0 +1,5 @@ +module github.com/fsnotify/fsnotify + +go 1.13 + +require golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 diff --git a/vendor/github.com/fsnotify/fsnotify/go.sum b/vendor/github.com/fsnotify/fsnotify/go.sum new file mode 100644 index 0000000..f60af98 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 h1:L2auWcuQIvxz9xSEqzESnV/QN/gNRXNApHi3fYwl2w0= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go index cc7db4b..b33f2b4 100644 --- a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go +++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go @@ -40,12 +40,12 @@ func newFdPoller(fd int) (*fdPoller, error) { poller.fd = fd // Create epoll fd - poller.epfd, errno = unix.EpollCreate1(0) + poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC) if poller.epfd == -1 { return nil, errno } // Create pipe; pipe[0] is the read end, pipe[1] the write end. - errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK) + errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC) if errno != nil { return nil, errno } diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go index 7d8de14..2306c46 100644 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go @@ -8,4 +8,4 @@ package fsnotify import "golang.org/x/sys/unix" -const openMode = unix.O_NONBLOCK | unix.O_RDONLY +const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go index 9139e17..870c4d6 100644 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go @@ -9,4 +9,4 @@ package fsnotify import "golang.org/x/sys/unix" // note: this constant is not defined on BSD -const openMode = unix.O_EVTONLY +const openMode = unix.O_EVTONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/.travis.yml b/vendor/github.com/go-asn1-ber/asn1-ber/.travis.yml new file mode 100644 index 0000000..764b541 --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/.travis.yml @@ -0,0 +1,43 @@ +language: go + +go: + - 1.2.x + - 1.6.x + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - 1.14.x + - tip + +os: + - linux + +arch: + - amd64 + - ppc64le + +dist: xenial + +env: + - GOARCH=amd64 + +jobs: + include: + - os: windows + go: 1.14.x + - os: osx + go: 1.14.x + - os: linux + go: 1.14.x + arch: arm64 + - os: linux + go: 1.14.x + env: + - GOARCH=386 + +script: + - go test -v -cover ./... || go test -v ./... +matrix: + allowfailures: + go: 1.2.x diff --git a/vendor/gopkg.in/asn1-ber.v1/LICENSE b/vendor/github.com/go-asn1-ber/asn1-ber/LICENSE similarity index 100% rename from vendor/gopkg.in/asn1-ber.v1/LICENSE rename to vendor/github.com/go-asn1-ber/asn1-ber/LICENSE diff --git a/vendor/gopkg.in/asn1-ber.v1/README.md b/vendor/github.com/go-asn1-ber/asn1-ber/README.md similarity index 100% rename from vendor/gopkg.in/asn1-ber.v1/README.md rename to vendor/github.com/go-asn1-ber/asn1-ber/README.md diff --git a/vendor/gopkg.in/asn1-ber.v1/ber.go b/vendor/github.com/go-asn1-ber/asn1-ber/ber.go similarity index 69% rename from vendor/gopkg.in/asn1-ber.v1/ber.go rename to vendor/github.com/go-asn1-ber/asn1-ber/ber.go index 6153f46..4fd7a66 100644 --- a/vendor/gopkg.in/asn1-ber.v1/ber.go +++ b/vendor/github.com/go-asn1-ber/asn1-ber/ber.go @@ -8,6 +8,8 @@ import ( "math" "os" "reflect" + "time" + "unicode/utf8" ) // MaxPacketLengthBytes specifies the maximum allowed packet size when calling ReadPacket or DecodePacket. Set to 0 for @@ -143,42 +145,46 @@ var TypeMap = map[Type]string{ TypeConstructed: "Constructed", } -var Debug bool = false +var Debug = false func PrintBytes(out io.Writer, buf []byte, indent string) { - data_lines := make([]string, (len(buf)/30)+1) - num_lines := make([]string, (len(buf)/30)+1) + dataLines := make([]string, (len(buf)/30)+1) + numLines := make([]string, (len(buf)/30)+1) for i, b := range buf { - data_lines[i/30] += fmt.Sprintf("%02x ", b) - num_lines[i/30] += fmt.Sprintf("%02d ", (i+1)%100) + dataLines[i/30] += fmt.Sprintf("%02x ", b) + numLines[i/30] += fmt.Sprintf("%02d ", (i+1)%100) } - for i := 0; i < len(data_lines); i++ { - out.Write([]byte(indent + data_lines[i] + "\n")) - out.Write([]byte(indent + num_lines[i] + "\n\n")) + for i := 0; i < len(dataLines); i++ { + _, _ = out.Write([]byte(indent + dataLines[i] + "\n")) + _, _ = out.Write([]byte(indent + numLines[i] + "\n\n")) } } +func WritePacket(out io.Writer, p *Packet) { + printPacket(out, p, 0, false) +} + func PrintPacket(p *Packet) { printPacket(os.Stdout, p, 0, false) } func printPacket(out io.Writer, p *Packet, indent int, printBytes bool) { - indent_str := "" + indentStr := "" - for len(indent_str) != indent { - indent_str += " " + for len(indentStr) != indent { + indentStr += " " } - class_str := ClassMap[p.ClassType] + classStr := ClassMap[p.ClassType] - tagtype_str := TypeMap[p.TagType] + tagTypeStr := TypeMap[p.TagType] - tag_str := fmt.Sprintf("0x%02X", p.Tag) + tagStr := fmt.Sprintf("0x%02X", p.Tag) if p.ClassType == ClassUniversal { - tag_str = tagMap[p.Tag] + tagStr = tagMap[p.Tag] } value := fmt.Sprint(p.Value) @@ -188,10 +194,10 @@ func printPacket(out io.Writer, p *Packet, indent int, printBytes bool) { description = p.Description + ": " } - fmt.Fprintf(out, "%s%s(%s, %s, %s) Len=%d %q\n", indent_str, description, class_str, tagtype_str, tag_str, p.Data.Len(), value) + _, _ = fmt.Fprintf(out, "%s%s(%s, %s, %s) Len=%d %q\n", indentStr, description, classStr, tagTypeStr, tagStr, p.Data.Len(), value) if printBytes { - PrintBytes(out, p.Bytes(), indent_str) + PrintBytes(out, p.Bytes(), indentStr) } for _, child := range p.Children { @@ -199,7 +205,7 @@ func printPacket(out io.Writer, p *Packet, indent int, printBytes bool) { } } -// ReadPacket reads a single Packet from the reader +// ReadPacket reads a single Packet from the reader. func ReadPacket(reader io.Reader) (*Packet, error) { p, _, err := readPacket(reader) if err != nil { @@ -235,7 +241,7 @@ func encodeInteger(i int64) []byte { var j int for ; n > 0; n-- { - out[j] = (byte(i >> uint((n-1)*8))) + out[j] = byte(i >> uint((n-1)*8)) j++ } @@ -267,7 +273,7 @@ func DecodePacket(data []byte) *Packet { } // DecodePacketErr decodes the given bytes into a single Packet -// If a decode error is encountered, nil is returned +// If a decode error is encountered, nil is returned. func DecodePacketErr(data []byte) (*Packet, error) { p, _, err := readPacket(bytes.NewBuffer(data)) if err != nil { @@ -276,7 +282,7 @@ func DecodePacketErr(data []byte) (*Packet, error) { return p, nil } -// readPacket reads a single Packet from the reader, returning the number of bytes read +// readPacket reads a single Packet from the reader, returning the number of bytes read. func readPacket(reader io.Reader) (*Packet, int, error) { identifier, length, read, err := readHeader(reader) if err != nil { @@ -338,7 +344,7 @@ func readPacket(reader io.Reader) (*Packet, int, error) { if MaxPacketLengthBytes > 0 && int64(length) > MaxPacketLengthBytes { return nil, read, fmt.Errorf("length %d greater than maximum %d", length, MaxPacketLengthBytes) } - content := make([]byte, length, length) + content := make([]byte, length) if length > 0 { _, err := io.ReadFull(reader, content) if err != nil { @@ -373,22 +379,42 @@ func readPacket(reader io.Reader) (*Packet, int, error) { case TagObjectDescriptor: case TagExternal: case TagRealFloat: + p.Value, err = ParseReal(content) case TagEnumerated: p.Value, _ = ParseInt64(content) case TagEmbeddedPDV: case TagUTF8String: - p.Value = DecodeString(content) + val := DecodeString(content) + if !utf8.Valid([]byte(val)) { + err = errors.New("invalid UTF-8 string") + } else { + p.Value = val + } case TagRelativeOID: case TagSequence: case TagSet: case TagNumericString: case TagPrintableString: - p.Value = DecodeString(content) + val := DecodeString(content) + if err = isPrintableString(val); err == nil { + p.Value = val + } case TagT61String: case TagVideotexString: case TagIA5String: + val := DecodeString(content) + for i, c := range val { + if c >= 0x7F { + err = fmt.Errorf("invalid character for IA5String at pos %d: %c", i, c) + break + } + } + if err == nil { + p.Value = val + } case TagUTCTime: case TagGeneralizedTime: + p.Value, err = ParseGeneralizedTime(content) case TagGraphicString: case TagVisibleString: case TagGeneralString: @@ -400,7 +426,24 @@ func readPacket(reader io.Reader) (*Packet, int, error) { p.Data.Write(content) } - return p, read, nil + return p, read, err +} + +func isPrintableString(val string) error { + for i, c := range val { + switch { + case c >= 'a' && c <= 'z': + case c >= 'A' && c <= 'Z': + case c >= '0' && c <= '9': + default: + switch c { + case '\'', '(', ')', '+', ',', '-', '.', '=', '/', ':', '?', ' ': + default: + return fmt.Errorf("invalid character in position %d", i) + } + } + } + return nil } func (p *Packet) Bytes() []byte { @@ -418,61 +461,99 @@ func (p *Packet) AppendChild(child *Packet) { p.Children = append(p.Children, child) } -func Encode(ClassType Class, TagType Type, Tag Tag, Value interface{}, Description string) *Packet { +func Encode(classType Class, tagType Type, tag Tag, value interface{}, description string) *Packet { p := new(Packet) - p.ClassType = ClassType - p.TagType = TagType - p.Tag = Tag + p.ClassType = classType + p.TagType = tagType + p.Tag = tag p.Data = new(bytes.Buffer) p.Children = make([]*Packet, 0, 2) - p.Value = Value - p.Description = Description + p.Value = value + p.Description = description - if Value != nil { - v := reflect.ValueOf(Value) + if value != nil { + v := reflect.ValueOf(value) - if ClassType == ClassUniversal { - switch Tag { + if classType == ClassUniversal { + switch tag { case TagOctetString: sv, ok := v.Interface().(string) if ok { p.Data.Write([]byte(sv)) } + case TagEnumerated: + bv, ok := v.Interface().([]byte) + if ok { + p.Data.Write(bv) + } + case TagEmbeddedPDV: + bv, ok := v.Interface().([]byte) + if ok { + p.Data.Write(bv) + } + } + } else if classType == ClassContext { + switch tag { + case TagEnumerated: + bv, ok := v.Interface().([]byte) + if ok { + p.Data.Write(bv) + } + case TagEmbeddedPDV: + bv, ok := v.Interface().([]byte) + if ok { + p.Data.Write(bv) + } } } } - return p } -func NewSequence(Description string) *Packet { - return Encode(ClassUniversal, TypeConstructed, TagSequence, nil, Description) +func NewSequence(description string) *Packet { + return Encode(ClassUniversal, TypeConstructed, TagSequence, nil, description) } -func NewBoolean(ClassType Class, TagType Type, Tag Tag, Value bool, Description string) *Packet { +func NewBoolean(classType Class, tagType Type, tag Tag, value bool, description string) *Packet { intValue := int64(0) - if Value { + if value { intValue = 1 } - p := Encode(ClassType, TagType, Tag, nil, Description) + p := Encode(classType, tagType, tag, nil, description) + + p.Value = value + p.Data.Write(encodeInteger(intValue)) + + return p +} + +// NewLDAPBoolean returns a RFC 4511-compliant Boolean packet. +func NewLDAPBoolean(classType Class, tagType Type, tag Tag, value bool, description string) *Packet { + intValue := int64(0) + + if value { + intValue = 255 + } + + p := Encode(classType, tagType, tag, nil, description) - p.Value = Value + p.Value = value p.Data.Write(encodeInteger(intValue)) return p } -func NewInteger(ClassType Class, TagType Type, Tag Tag, Value interface{}, Description string) *Packet { - p := Encode(ClassType, TagType, Tag, nil, Description) +func NewInteger(classType Class, tagType Type, tag Tag, value interface{}, description string) *Packet { + p := Encode(classType, tagType, tag, nil, description) - p.Value = Value - switch v := Value.(type) { + p.Value = value + switch v := value.(type) { case int: p.Data.Write(encodeInteger(int64(v))) case uint: @@ -502,11 +583,38 @@ func NewInteger(ClassType Class, TagType Type, Tag Tag, Value interface{}, Descr return p } -func NewString(ClassType Class, TagType Type, Tag Tag, Value, Description string) *Packet { - p := Encode(ClassType, TagType, Tag, nil, Description) +func NewString(classType Class, tagType Type, tag Tag, value, description string) *Packet { + p := Encode(classType, tagType, tag, nil, description) - p.Value = Value - p.Data.Write([]byte(Value)) + p.Value = value + p.Data.Write([]byte(value)) return p } + +func NewGeneralizedTime(classType Class, tagType Type, tag Tag, value time.Time, description string) *Packet { + p := Encode(classType, tagType, tag, nil, description) + var s string + if value.Nanosecond() != 0 { + s = value.Format(`20060102150405.000000000Z`) + } else { + s = value.Format(`20060102150405Z`) + } + p.Value = s + p.Data.Write([]byte(s)) + return p +} + +func NewReal(classType Class, tagType Type, tag Tag, value interface{}, description string) *Packet { + p := Encode(classType, tagType, tag, nil, description) + + switch v := value.(type) { + case float64: + p.Data.Write(encodeFloat(v)) + case float32: + p.Data.Write(encodeFloat(float64(v))) + default: + panic(fmt.Sprintf("Invalid type %T, expected float{64|32}", v)) + } + return p +} diff --git a/vendor/gopkg.in/asn1-ber.v1/content_int.go b/vendor/github.com/go-asn1-ber/asn1-ber/content_int.go similarity index 87% rename from vendor/gopkg.in/asn1-ber.v1/content_int.go rename to vendor/github.com/go-asn1-ber/asn1-ber/content_int.go index 1858b74..20b500f 100644 --- a/vendor/gopkg.in/asn1-ber.v1/content_int.go +++ b/vendor/github.com/go-asn1-ber/asn1-ber/content_int.go @@ -6,7 +6,7 @@ func encodeUnsignedInteger(i uint64) []byte { var j int for ; n > 0; n-- { - out[j] = (byte(i >> uint((n-1)*8))) + out[j] = byte(i >> uint((n-1)*8)) j++ } diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/generalizedTime.go b/vendor/github.com/go-asn1-ber/asn1-ber/generalizedTime.go new file mode 100644 index 0000000..51215f0 --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/generalizedTime.go @@ -0,0 +1,105 @@ +package ber + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "time" +) + +// ErrInvalidTimeFormat is returned when the generalizedTime string was not correct. +var ErrInvalidTimeFormat = errors.New("invalid time format") + +var zeroTime = time.Time{} + +// ParseGeneralizedTime parses a string value and if it conforms to +// GeneralizedTime[^0] format, will return a time.Time for that value. +// +// [^0]: https://www.itu.int/rec/T-REC-X.690-201508-I/en Section 11.7 +func ParseGeneralizedTime(v []byte) (time.Time, error) { + var format string + var fract time.Duration + + str := []byte(DecodeString(v)) + tzIndex := bytes.IndexAny(str, "Z+-") + if tzIndex < 0 { + return zeroTime, ErrInvalidTimeFormat + } + + dot := bytes.IndexAny(str, ".,") + switch dot { + case -1: + switch tzIndex { + case 10: + format = `2006010215Z` + case 12: + format = `200601021504Z` + case 14: + format = `20060102150405Z` + default: + return zeroTime, ErrInvalidTimeFormat + } + + case 10, 12: + if tzIndex < dot { + return zeroTime, ErrInvalidTimeFormat + } + // a "," is also allowed, but would not be parsed by time.Parse(): + str[dot] = '.' + + // If is omitted, then represents a fraction of an + // hour; otherwise, if and are omitted, then + // represents a fraction of a minute; otherwise, + // represents a fraction of a second. + + // parse as float from dot to timezone + f, err := strconv.ParseFloat(string(str[dot:tzIndex]), 64) + if err != nil { + return zeroTime, fmt.Errorf("failed to parse float: %s", err) + } + // ...and strip that part + str = append(str[:dot], str[tzIndex:]...) + tzIndex = dot + + if dot == 10 { + fract = time.Duration(int64(f * float64(time.Hour))) + format = `2006010215Z` + } else { + fract = time.Duration(int64(f * float64(time.Minute))) + format = `200601021504Z` + } + + case 14: + if tzIndex < dot { + return zeroTime, ErrInvalidTimeFormat + } + str[dot] = '.' + // no need for fractional seconds, time.Parse() handles that + format = `20060102150405Z` + + default: + return zeroTime, ErrInvalidTimeFormat + } + + l := len(str) + switch l - tzIndex { + case 1: + if str[l-1] != 'Z' { + return zeroTime, ErrInvalidTimeFormat + } + case 3: + format += `0700` + str = append(str, []byte("00")...) + case 5: + format += `0700` + default: + return zeroTime, ErrInvalidTimeFormat + } + + t, err := time.Parse(format, string(str)) + if err != nil { + return zeroTime, fmt.Errorf("%s: %s", ErrInvalidTimeFormat, err) + } + return t.Add(fract), nil +} diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/go.mod b/vendor/github.com/go-asn1-ber/asn1-ber/go.mod new file mode 100644 index 0000000..ee0b4be --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/go.mod @@ -0,0 +1,3 @@ +module github.com/go-asn1-ber/asn1-ber + +go 1.13 diff --git a/vendor/gopkg.in/asn1-ber.v1/header.go b/vendor/github.com/go-asn1-ber/asn1-ber/header.go similarity index 75% rename from vendor/gopkg.in/asn1-ber.v1/header.go rename to vendor/github.com/go-asn1-ber/asn1-ber/header.go index 7161562..7dfa6b9 100644 --- a/vendor/gopkg.in/asn1-ber.v1/header.go +++ b/vendor/github.com/go-asn1-ber/asn1-ber/header.go @@ -7,19 +7,22 @@ import ( ) func readHeader(reader io.Reader) (identifier Identifier, length int, read int, err error) { - if i, c, err := readIdentifier(reader); err != nil { + var ( + c, l int + i Identifier + ) + + if i, c, err = readIdentifier(reader); err != nil { return Identifier{}, 0, read, err - } else { - identifier = i - read += c } + identifier = i + read += c - if l, c, err := readLength(reader); err != nil { + if l, c, err = readLength(reader); err != nil { return Identifier{}, 0, read, err - } else { - length = l - read += c } + length = l + read += c // Validate length type with identifier (x.600, 8.1.3.2.a) if length == LengthIndefinite && identifier.TagType == TypePrimitive { diff --git a/vendor/gopkg.in/asn1-ber.v1/identifier.go b/vendor/github.com/go-asn1-ber/asn1-ber/identifier.go similarity index 100% rename from vendor/gopkg.in/asn1-ber.v1/identifier.go rename to vendor/github.com/go-asn1-ber/asn1-ber/identifier.go diff --git a/vendor/gopkg.in/asn1-ber.v1/length.go b/vendor/github.com/go-asn1-ber/asn1-ber/length.go similarity index 85% rename from vendor/gopkg.in/asn1-ber.v1/length.go rename to vendor/github.com/go-asn1-ber/asn1-ber/length.go index 750e8f4..9cc195d 100644 --- a/vendor/gopkg.in/asn1-ber.v1/length.go +++ b/vendor/github.com/go-asn1-ber/asn1-ber/length.go @@ -71,11 +71,11 @@ func readLength(reader io.Reader) (length int, read int, err error) { } func encodeLength(length int) []byte { - length_bytes := encodeUnsignedInteger(uint64(length)) - if length > 127 || len(length_bytes) > 1 { - longFormBytes := []byte{(LengthLongFormBitmask | byte(len(length_bytes)))} - longFormBytes = append(longFormBytes, length_bytes...) - length_bytes = longFormBytes + lengthBytes := encodeUnsignedInteger(uint64(length)) + if length > 127 || len(lengthBytes) > 1 { + longFormBytes := []byte{LengthLongFormBitmask | byte(len(lengthBytes))} + longFormBytes = append(longFormBytes, lengthBytes...) + lengthBytes = longFormBytes } - return length_bytes + return lengthBytes } diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/real.go b/vendor/github.com/go-asn1-ber/asn1-ber/real.go new file mode 100644 index 0000000..610a003 --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/real.go @@ -0,0 +1,157 @@ +package ber + +import ( + "bytes" + "errors" + "fmt" + "math" + "strconv" + "strings" +) + +func encodeFloat(v float64) []byte { + switch { + case math.IsInf(v, 1): + return []byte{0x40} + case math.IsInf(v, -1): + return []byte{0x41} + case math.IsNaN(v): + return []byte{0x42} + case v == 0.0: + if math.Signbit(v) { + return []byte{0x43} + } + return []byte{} + default: + // we take the easy part ;-) + value := []byte(strconv.FormatFloat(v, 'G', -1, 64)) + var ret []byte + if bytes.Contains(value, []byte{'E'}) { + ret = []byte{0x03} + } else { + ret = []byte{0x02} + } + ret = append(ret, value...) + return ret + } +} + +func ParseReal(v []byte) (val float64, err error) { + if len(v) == 0 { + return 0.0, nil + } + switch { + case v[0]&0x80 == 0x80: + val, err = parseBinaryFloat(v) + case v[0]&0xC0 == 0x40: + val, err = parseSpecialFloat(v) + case v[0]&0xC0 == 0x0: + val, err = parseDecimalFloat(v) + default: + return 0.0, fmt.Errorf("invalid info block") + } + if err != nil { + return 0.0, err + } + + if val == 0.0 && !math.Signbit(val) { + return 0.0, errors.New("REAL value +0 must be encoded with zero-length value block") + } + return val, nil +} + +func parseBinaryFloat(v []byte) (float64, error) { + var info byte + var buf []byte + + info, v = v[0], v[1:] + + var base int + switch info & 0x30 { + case 0x00: + base = 2 + case 0x10: + base = 8 + case 0x20: + base = 16 + case 0x30: + return 0.0, errors.New("bits 6 and 5 of information octet for REAL are equal to 11") + } + + scale := uint((info & 0x0c) >> 2) + + var expLen int + switch info & 0x03 { + case 0x00: + expLen = 1 + case 0x01: + expLen = 2 + case 0x02: + expLen = 3 + case 0x03: + expLen = int(v[0]) + if expLen > 8 { + return 0.0, errors.New("too big value of exponent") + } + v = v[1:] + } + buf, v = v[:expLen], v[expLen:] + exponent, err := ParseInt64(buf) + if err != nil { + return 0.0, err + } + + if len(v) > 8 { + return 0.0, errors.New("too big value of mantissa") + } + + mant, err := ParseInt64(v) + if err != nil { + return 0.0, err + } + mantissa := mant << scale + + if info&0x40 == 0x40 { + mantissa = -mantissa + } + + return float64(mantissa) * math.Pow(float64(base), float64(exponent)), nil +} + +func parseDecimalFloat(v []byte) (val float64, err error) { + switch v[0] & 0x3F { + case 0x01: // NR form 1 + var iVal int64 + iVal, err = strconv.ParseInt(strings.TrimLeft(string(v[1:]), " "), 10, 64) + val = float64(iVal) + case 0x02, 0x03: // NR form 2, 3 + val, err = strconv.ParseFloat(strings.Replace(strings.TrimLeft(string(v[1:]), " "), ",", ".", -1), 64) + default: + err = errors.New("incorrect NR form") + } + if err != nil { + return 0.0, err + } + + if val == 0.0 && math.Signbit(val) { + return 0.0, errors.New("REAL value -0 must be encoded as a special value") + } + return val, nil +} + +func parseSpecialFloat(v []byte) (float64, error) { + if len(v) != 1 { + return 0.0, errors.New(`encoding of "special value" must not contain exponent and mantissa`) + } + switch v[0] { + case 0x40: + return math.Inf(1), nil + case 0x41: + return math.Inf(-1), nil + case 0x42: + return math.NaN(), nil + case 0x43: + return math.Copysign(0, -1), nil + } + return 0.0, errors.New(`encoding of "special value" not from ASN.1 standard`) +} diff --git a/vendor/gopkg.in/asn1-ber.v1/util.go b/vendor/github.com/go-asn1-ber/asn1-ber/util.go similarity index 93% rename from vendor/gopkg.in/asn1-ber.v1/util.go rename to vendor/github.com/go-asn1-ber/asn1-ber/util.go index 3e56b66..14dc87d 100644 --- a/vendor/gopkg.in/asn1-ber.v1/util.go +++ b/vendor/github.com/go-asn1-ber/asn1-ber/util.go @@ -3,7 +3,7 @@ package ber import "io" func readByte(reader io.Reader) (byte, error) { - bytes := make([]byte, 1, 1) + bytes := make([]byte, 1) _, err := io.ReadFull(reader, bytes) if err != nil { if err == io.EOF { diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go new file mode 100644 index 0000000..e810e6f --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/buffer.go @@ -0,0 +1,324 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "errors" + "fmt" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + WireVarint = 0 + WireFixed32 = 5 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 +) + +// EncodeVarint returns the varint encoded bytes of v. +func EncodeVarint(v uint64) []byte { + return protowire.AppendVarint(nil, v) +} + +// SizeVarint returns the length of the varint encoded bytes of v. +// This is equal to len(EncodeVarint(v)). +func SizeVarint(v uint64) int { + return protowire.SizeVarint(v) +} + +// DecodeVarint parses a varint encoded integer from b, +// returning the integer value and the length of the varint. +// It returns (0, 0) if there is a parse error. +func DecodeVarint(b []byte) (uint64, int) { + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, 0 + } + return v, n +} + +// Buffer is a buffer for encoding and decoding the protobuf wire format. +// It may be reused between invocations to reduce memory usage. +type Buffer struct { + buf []byte + idx int + deterministic bool +} + +// NewBuffer allocates a new Buffer initialized with buf, +// where the contents of buf are considered the unread portion of the buffer. +func NewBuffer(buf []byte) *Buffer { + return &Buffer{buf: buf} +} + +// SetDeterministic specifies whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (b *Buffer) SetDeterministic(deterministic bool) { + b.deterministic = deterministic +} + +// SetBuf sets buf as the internal buffer, +// where the contents of buf are considered the unread portion of the buffer. +func (b *Buffer) SetBuf(buf []byte) { + b.buf = buf + b.idx = 0 +} + +// Reset clears the internal buffer of all written and unread data. +func (b *Buffer) Reset() { + b.buf = b.buf[:0] + b.idx = 0 +} + +// Bytes returns the internal buffer. +func (b *Buffer) Bytes() []byte { + return b.buf +} + +// Unread returns the unread portion of the buffer. +func (b *Buffer) Unread() []byte { + return b.buf[b.idx:] +} + +// Marshal appends the wire-format encoding of m to the buffer. +func (b *Buffer) Marshal(m Message) error { + var err error + b.buf, err = marshalAppend(b.buf, m, b.deterministic) + return err +} + +// Unmarshal parses the wire-format message in the buffer and +// places the decoded results in m. +// It does not reset m before unmarshaling. +func (b *Buffer) Unmarshal(m Message) error { + err := UnmarshalMerge(b.Unread(), m) + b.idx = len(b.buf) + return err +} + +type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields } + +func (m *unknownFields) String() string { panic("not implemented") } +func (m *unknownFields) Reset() { panic("not implemented") } +func (m *unknownFields) ProtoMessage() { panic("not implemented") } + +// DebugPrint dumps the encoded bytes of b with a header and footer including s +// to stdout. This is only intended for debugging. +func (*Buffer) DebugPrint(s string, b []byte) { + m := MessageReflect(new(unknownFields)) + m.SetUnknown(b) + b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface()) + fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s) +} + +// EncodeVarint appends an unsigned varint encoding to the buffer. +func (b *Buffer) EncodeVarint(v uint64) error { + b.buf = protowire.AppendVarint(b.buf, v) + return nil +} + +// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer. +func (b *Buffer) EncodeZigzag32(v uint64) error { + return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) +} + +// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer. +func (b *Buffer) EncodeZigzag64(v uint64) error { + return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63)))) +} + +// EncodeFixed32 appends a 32-bit little-endian integer to the buffer. +func (b *Buffer) EncodeFixed32(v uint64) error { + b.buf = protowire.AppendFixed32(b.buf, uint32(v)) + return nil +} + +// EncodeFixed64 appends a 64-bit little-endian integer to the buffer. +func (b *Buffer) EncodeFixed64(v uint64) error { + b.buf = protowire.AppendFixed64(b.buf, uint64(v)) + return nil +} + +// EncodeRawBytes appends a length-prefixed raw bytes to the buffer. +func (b *Buffer) EncodeRawBytes(v []byte) error { + b.buf = protowire.AppendBytes(b.buf, v) + return nil +} + +// EncodeStringBytes appends a length-prefixed raw bytes to the buffer. +// It does not validate whether v contains valid UTF-8. +func (b *Buffer) EncodeStringBytes(v string) error { + b.buf = protowire.AppendString(b.buf, v) + return nil +} + +// EncodeMessage appends a length-prefixed encoded message to the buffer. +func (b *Buffer) EncodeMessage(m Message) error { + var err error + b.buf = protowire.AppendVarint(b.buf, uint64(Size(m))) + b.buf, err = marshalAppend(b.buf, m, b.deterministic) + return err +} + +// DecodeVarint consumes an encoded unsigned varint from the buffer. +func (b *Buffer) DecodeVarint() (uint64, error) { + v, n := protowire.ConsumeVarint(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer. +func (b *Buffer) DecodeZigzag32() (uint64, error) { + v, err := b.DecodeVarint() + if err != nil { + return 0, err + } + return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil +} + +// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer. +func (b *Buffer) DecodeZigzag64() (uint64, error) { + v, err := b.DecodeVarint() + if err != nil { + return 0, err + } + return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil +} + +// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer. +func (b *Buffer) DecodeFixed32() (uint64, error) { + v, n := protowire.ConsumeFixed32(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer. +func (b *Buffer) DecodeFixed64() (uint64, error) { + v, n := protowire.ConsumeFixed64(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer. +// If alloc is specified, it returns a copy the raw bytes +// rather than a sub-slice of the buffer. +func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) { + v, n := protowire.ConsumeBytes(b.buf[b.idx:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + b.idx += n + if alloc { + v = append([]byte(nil), v...) + } + return v, nil +} + +// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer. +// It does not validate whether the raw bytes contain valid UTF-8. +func (b *Buffer) DecodeStringBytes() (string, error) { + v, n := protowire.ConsumeString(b.buf[b.idx:]) + if n < 0 { + return "", protowire.ParseError(n) + } + b.idx += n + return v, nil +} + +// DecodeMessage consumes a length-prefixed message from the buffer. +// It does not reset m before unmarshaling. +func (b *Buffer) DecodeMessage(m Message) error { + v, err := b.DecodeRawBytes(false) + if err != nil { + return err + } + return UnmarshalMerge(v, m) +} + +// DecodeGroup consumes a message group from the buffer. +// It assumes that the start group marker has already been consumed and +// consumes all bytes until (and including the end group marker). +// It does not reset m before unmarshaling. +func (b *Buffer) DecodeGroup(m Message) error { + v, n, err := consumeGroup(b.buf[b.idx:]) + if err != nil { + return err + } + b.idx += n + return UnmarshalMerge(v, m) +} + +// consumeGroup parses b until it finds an end group marker, returning +// the raw bytes of the message (excluding the end group marker) and the +// the total length of the message (including the end group marker). +func consumeGroup(b []byte) ([]byte, int, error) { + b0 := b + depth := 1 // assume this follows a start group marker + for { + _, wtyp, tagLen := protowire.ConsumeTag(b) + if tagLen < 0 { + return nil, 0, protowire.ParseError(tagLen) + } + b = b[tagLen:] + + var valLen int + switch wtyp { + case protowire.VarintType: + _, valLen = protowire.ConsumeVarint(b) + case protowire.Fixed32Type: + _, valLen = protowire.ConsumeFixed32(b) + case protowire.Fixed64Type: + _, valLen = protowire.ConsumeFixed64(b) + case protowire.BytesType: + _, valLen = protowire.ConsumeBytes(b) + case protowire.StartGroupType: + depth++ + case protowire.EndGroupType: + depth-- + default: + return nil, 0, errors.New("proto: cannot parse reserved wire type") + } + if valLen < 0 { + return nil, 0, protowire.ParseError(valLen) + } + b = b[valLen:] + + if depth == 0 { + return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go deleted file mode 100644 index 3cd3249..0000000 --- a/vendor/github.com/golang/protobuf/proto/clone.go +++ /dev/null @@ -1,253 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "fmt" - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(src Message) Message { - in := reflect.ValueOf(src) - if in.IsNil() { - return src - } - out := reflect.New(in.Type().Elem()) - dst := out.Interface().(Message) - Merge(dst, src) - return dst -} - -// Merger is the interface representing objects that can merge messages of the same type. -type Merger interface { - // Merge merges src into this message. - // Required and optional fields that are set in src will be set to that value in dst. - // Elements of repeated fields will be appended. - // - // Merge may panic if called with a different argument type than the receiver. - Merge(src Message) -} - -// generatedMerger is the custom merge method that generated protos will have. -// We must add this method since a generate Merge method will conflict with -// many existing protos that have a Merge data field already defined. -type generatedMerger interface { - XXX_Merge(src Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - if m, ok := dst.(Merger); ok { - m.Merge(src) - return - } - - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) - } - if in.IsNil() { - return // Merge from nil src is a noop - } - if m, ok := dst.(generatedMerger); ok { - m.XXX_Merge(src) - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go deleted file mode 100644 index 63b0f08..0000000 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ /dev/null @@ -1,427 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -// Unmarshal implementations should not clear the receiver. -// Any unmarshaled data should be merged into the receiver. -// Callers of Unmarshal that do not want to retain existing data -// should Reset the receiver before calling Unmarshal. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// newUnmarshaler is the interface representing objects that can -// unmarshal themselves. The semantics are identical to Unmarshaler. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newUnmarshaler interface { - XXX_Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -// StartGroup tag is already consumed. This function consumes -// EndGroup tag. -func (p *Buffer) DecodeGroup(pb Message) error { - b := p.buf[p.index:] - x, y := findEndGroup(b) - if x < 0 { - return io.ErrUnexpectedEOF - } - err := Unmarshal(b[:x], pb) - p.index += y - return err -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(newUnmarshaler); ok { - err := u.XXX_Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - // Slow workaround for messages that aren't Unmarshalers. - // This includes some hand-coded .pb.go files and - // bootstrap protos. - // TODO: fix all of those and then add Unmarshal to - // the Message interface. Then: - // The cast above and code below can be deleted. - // The old unmarshaler can be deleted. - // Clients can call Unmarshal directly (can already do that, actually). - var info InternalMessageInfo - err := info.Unmarshal(pb, p.buf[p.index:]) - p.index = len(p.buf) - return err -} diff --git a/vendor/github.com/golang/protobuf/proto/defaults.go b/vendor/github.com/golang/protobuf/proto/defaults.go new file mode 100644 index 0000000..d399bf0 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/defaults.go @@ -0,0 +1,63 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" +) + +// SetDefaults sets unpopulated scalar fields to their default values. +// Fields within a oneof are not set even if they have a default value. +// SetDefaults is recursively called upon any populated message fields. +func SetDefaults(m Message) { + if m != nil { + setDefaults(MessageReflect(m)) + } +} + +func setDefaults(m protoreflect.Message) { + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if !m.Has(fd) { + if fd.HasDefault() && fd.ContainingOneof() == nil { + v := fd.Default() + if fd.Kind() == protoreflect.BytesKind { + v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes + } + m.Set(fd, v) + } + continue + } + } + + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + switch { + // Handle singular message. + case fd.Cardinality() != protoreflect.Repeated: + if fd.Message() != nil { + setDefaults(m.Get(fd).Message()) + } + // Handle list of messages. + case fd.IsList(): + if fd.Message() != nil { + ls := m.Get(fd).List() + for i := 0; i < ls.Len(); i++ { + setDefaults(ls.Get(i).Message()) + } + } + // Handle map of messages. + case fd.IsMap(): + if fd.MapValue().Message() != nil { + ms := m.Get(fd).Map() + ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { + setDefaults(v.Message()) + return true + }) + } + } + return true + }) +} diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go index 35b882c..e8db57e 100644 --- a/vendor/github.com/golang/protobuf/proto/deprecated.go +++ b/vendor/github.com/golang/protobuf/proto/deprecated.go @@ -1,63 +1,113 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto -import "errors" +import ( + "encoding/json" + "errors" + "fmt" + "strconv" -// Deprecated: do not use. + protoV2 "google.golang.org/protobuf/proto" +) + +var ( + // Deprecated: No longer returned. + ErrNil = errors.New("proto: Marshal called with nil") + + // Deprecated: No longer returned. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") + + // Deprecated: No longer returned. + ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") +) + +// Deprecated: Do not use. type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } -// Deprecated: do not use. +// Deprecated: Do not use. func GetStats() Stats { return Stats{} } -// Deprecated: do not use. +// Deprecated: Do not use. func MarshalMessageSet(interface{}) ([]byte, error) { return nil, errors.New("proto: not implemented") } -// Deprecated: do not use. +// Deprecated: Do not use. func UnmarshalMessageSet([]byte, interface{}) error { return errors.New("proto: not implemented") } -// Deprecated: do not use. +// Deprecated: Do not use. func MarshalMessageSetJSON(interface{}) ([]byte, error) { return nil, errors.New("proto: not implemented") } -// Deprecated: do not use. +// Deprecated: Do not use. func UnmarshalMessageSetJSON([]byte, interface{}) error { return errors.New("proto: not implemented") } -// Deprecated: do not use. +// Deprecated: Do not use. func RegisterMessageSetType(Message, int32, string) {} + +// Deprecated: Do not use. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// Deprecated: Do not use. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// Deprecated: Do not use; this type existed for intenal-use only. +type InternalMessageInfo struct{} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) DiscardUnknown(m Message) { + DiscardUnknown(m) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) { + return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Merge(dst, src Message) { + protoV2.Merge(MessageV2(dst), MessageV2(src)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Size(m Message) int { + return protoV2.Size(MessageV2(m)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error { + return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m)) +} diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go index dea2617..2187e87 100644 --- a/vendor/github.com/golang/protobuf/proto/discard.go +++ b/vendor/github.com/golang/protobuf/proto/discard.go @@ -1,48 +1,13 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" + "google.golang.org/protobuf/reflect/protoreflect" ) -type generatedDiscarder interface { - XXX_DiscardUnknown() -} - // DiscardUnknown recursively discards all unknown fields from this message // and all embedded messages. // @@ -51,300 +16,43 @@ type generatedDiscarder interface { // marshal to be able to produce a message that continues to have those // unrecognized fields. To avoid this, DiscardUnknown is used to // explicitly clear the unknown fields after unmarshaling. -// -// For proto2 messages, the unknown fields of message extensions are only -// discarded from messages that have been accessed via GetExtension. func DiscardUnknown(m Message) { - if m, ok := m.(generatedDiscarder); ok { - m.XXX_DiscardUnknown() - return - } - // TODO: Dynamically populate a InternalMessageInfo for legacy messages, - // but the master branch has no implementation for InternalMessageInfo, - // so it would be more work to replicate that approach. - discardLegacy(m) -} - -// DiscardUnknown recursively discards all unknown fields. -func (a *InternalMessageInfo) DiscardUnknown(m Message) { - di := atomicLoadDiscardInfo(&a.discard) - if di == nil { - di = getDiscardInfo(reflect.TypeOf(m).Elem()) - atomicStoreDiscardInfo(&a.discard, di) - } - di.discard(toPointer(&m)) -} - -type discardInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []discardFieldInfo - unrecognized field -} - -type discardFieldInfo struct { - field field // Offset of field, guaranteed to be valid - discard func(src pointer) -} - -var ( - discardInfoMap = map[reflect.Type]*discardInfo{} - discardInfoLock sync.Mutex -) - -func getDiscardInfo(t reflect.Type) *discardInfo { - discardInfoLock.Lock() - defer discardInfoLock.Unlock() - di := discardInfoMap[t] - if di == nil { - di = &discardInfo{typ: t} - discardInfoMap[t] = di + if m != nil { + discardUnknown(MessageReflect(m)) } - return di } -func (di *discardInfo) discard(src pointer) { - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&di.initialized) == 0 { - di.computeDiscardInfo() - } - - for _, fi := range di.fields { - sfp := src.offset(fi.field) - fi.discard(sfp) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { - // Ignore lock since DiscardUnknown is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - DiscardUnknown(m) +func discardUnknown(m protoreflect.Message) { + m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool { + switch { + // Handle singular message. + case fd.Cardinality() != protoreflect.Repeated: + if fd.Message() != nil { + discardUnknown(m.Get(fd).Message()) } - } - } - - if di.unrecognized.IsValid() { - *src.offset(di.unrecognized).toBytes() = nil - } -} - -func (di *discardInfo) computeDiscardInfo() { - di.lock.Lock() - defer di.lock.Unlock() - if di.initialized != 0 { - return - } - t := di.typ - n := t.NumField() - - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - dfi := discardFieldInfo{field: toField(&f)} - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) - case isSlice: // E.g., []*pb.T - di := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sps := src.getPointerSlice() - for _, sp := range sps { - if !sp.isNil() { - di.discard(sp) - } - } - } - default: // E.g., *pb.T - di := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sp := src.getPointer() - if !sp.isNil() { - di.discard(sp) - } + // Handle list of messages. + case fd.IsList(): + if fd.Message() != nil { + ls := m.Get(fd).List() + for i := 0; i < ls.Len(); i++ { + discardUnknown(ls.Get(i).Message()) } } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) - default: // E.g., map[K]V - if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) - dfi.discard = func(src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - DiscardUnknown(val.Interface().(Message)) - } - } - } else { - dfi.discard = func(pointer) {} // Noop - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) - default: // E.g., interface{} - // TODO: Make this faster? - dfi.discard = func(src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - DiscardUnknown(sv.Interface().(Message)) - } - } - } + // Handle map of messages. + case fd.IsMap(): + if fd.MapValue().Message() != nil { + ms := m.Get(fd).Map() + ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { + discardUnknown(v.Message()) + return true + }) } - default: - continue - } - di.fields = append(di.fields, dfi) - } - - di.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - di.unrecognized = toField(&f) - } - - atomic.StoreInt32(&di.initialized, 1) -} - -func discardLegacy(m Message) { - v := reflect.ValueOf(m) - if v.Kind() != reflect.Ptr || v.IsNil() { - return - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return - } - t := v.Type() - - for i := 0; i < v.NumField(); i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue } - vf := v.Field(i) - tf := f.Type + return true + }) - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) - case isSlice: // E.g., []*pb.T - for j := 0; j < vf.Len(); j++ { - discardLegacy(vf.Index(j).Interface().(Message)) - } - default: // E.g., *pb.T - discardLegacy(vf.Interface().(Message)) - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) - default: // E.g., map[K]V - tv := vf.Type().Elem() - if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) - for _, key := range vf.MapKeys() { - val := vf.MapIndex(key) - discardLegacy(val.Interface().(Message)) - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) - default: // E.g., test_proto.isCommunique_Union interface - if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { - vf = vf.Elem() // E.g., *test_proto.Communique_Msg - if !vf.IsNil() { - vf = vf.Elem() // E.g., test_proto.Communique_Msg - vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value - if vf.Kind() == reflect.Ptr { - discardLegacy(vf.Interface().(Message)) - } - } - } - } - } - } - - if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { - if vf.Type() != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - vf.Set(reflect.ValueOf([]byte(nil))) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(m); err == nil { - // Ignore lock since discardLegacy is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - discardLegacy(m) - } - } + // Discard unknown fields. + if len(m.GetUnknown()) > 0 { + m.SetUnknown(nil) } } diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go deleted file mode 100644 index 3abfed2..0000000 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ /dev/null @@ -1,203 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "reflect" -) - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - switch { - case x < 1<<7: - return 1 - case x < 1<<14: - return 2 - case x < 1<<21: - return 3 - case x < 1<<28: - return 4 - case x < 1<<35: - return 5 - case x < 1<<42: - return 6 - case x < 1<<49: - return 7 - case x < 1<<56: - return 8 - case x < 1<<63: - return 9 - } - return 10 -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - siz := Size(pb) - p.EncodeVarint(uint64(siz)) - return p.Marshal(pb) -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go deleted file mode 100644 index f9b6e41..0000000 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ /dev/null @@ -1,301 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - return bytes.Equal(u1, u2) -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1 := extensionAsLegacyType(e1.value) - m2 := extensionAsLegacyType(e2.value) - - if m1 == nil && m2 == nil { - // Both have only encoded form. - if bytes.Equal(e1.enc, e2.enc) { - continue - } - // The bytes are different, but the extensions might still be - // equal. We need to decode them to compare. - } - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - // If both have only encoded form and the bytes are the same, - // it is handled above. We get here when the bytes are different. - // We don't know how to decode it, so just compare them as byte - // slices. - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - return false - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index fa88add..42fc120 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -1,607 +1,356 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto -/* - * Types and routines for supporting protocol buffer extensions. - */ - import ( "errors" "fmt" - "io" "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} +type ( + // ExtensionDesc represents an extension descriptor and + // is used to interact with an extension field in a message. + // + // Variables of this type are generated in code by protoc-gen-go. + ExtensionDesc = protoimpl.ExtensionInfo -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} + // ExtensionRange represents a range of message extensions. + // Used in code generated by protoc-gen-go. + ExtensionRange = protoiface.ExtensionRangeV1 -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} + // Deprecated: Do not use; this is an internal type. + Extension = protoimpl.ExtensionFieldV1 -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} + // Deprecated: Do not use; this is an internal type. + XXX_InternalExtensions = protoimpl.ExtensionFields +) -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, error) { - switch p := p.(type) { - case extendableProto: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return p, nil - case extendableProtoV1: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return extensionAdapter{p}, nil - } - // Don't allocate a specific error containing %T: - // this is the hot path for Clone and MarshalText. - return nil, errNotExtendable -} +// ErrMissingExtension reports whether the extension was not present. +var ErrMissingExtension = errors.New("proto: missing extension") var errNotExtendable = errors.New("proto: not an extendable proto.Message") -func isNilPtr(x interface{}) bool { - v := reflect.ValueOf(x) - return v.Kind() == reflect.Ptr && v.IsNil() -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension +// HasExtension reports whether the extension field is present in m +// either as an explicitly populated field or as an unknown field. +func HasExtension(m Message, xt *ExtensionDesc) (has bool) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return false } -} -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension + // Check whether any populated known field matches the field number. + xtd := xt.TypeDescriptor() + if isValidExtension(mr.Descriptor(), xtd) { + has = mr.Has(xtd) + } else { + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + has = int32(fd.Number()) == xt.Field + return !has }) - e.p.extensionMap = make(map[int32]Extension) } - return e.p.extensionMap -} -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil + // Check whether any unknown field matches the field number. + for b := mr.GetUnknown(); !has && len(b) > 0; { + num, _, n := protowire.ConsumeField(b) + has = int32(num) == xt.Field + b = b[n:] } - return e.p.extensionMap, &e.p.mu -} - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - - // value is a concrete value for the extension field. Let the type of - // desc.ExtensionType be the "API type" and the type of Extension.value - // be the "storage type". The API type and storage type are the same except: - // * For scalars (except []byte), the API type uses *T, - // while the storage type uses T. - // * For repeated fields, the API type uses []T, while the storage type - // uses *[]T. - // - // The reason for the divergence is so that the storage type more naturally - // matches what is expected of when retrieving the values through the - // protobuf reflection APIs. - // - // The value may only be populated if desc is also populated. - value interface{} - - // enc is the raw bytes for the extension field. - enc []byte + return has } -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - epb, err := extendable(base) - if err != nil { +// ClearExtension removes the extension field from m +// either as an explicitly populated field or as an unknown field. +func ClearExtension(m Message, xt *ExtensionDesc) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { return } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { + xtd := xt.TypeDescriptor() + if isValidExtension(mr.Descriptor(), xtd) { + mr.Clear(xtd) + } else { + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + if int32(fd.Number()) == xt.Field { + mr.Clear(fd) + return false + } return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - // TODO: Check types, field numbers, etc.? - epb, err := extendable(pb) - if err != nil { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false + }) } - mu.Lock() - _, ok := extmap[extension.Field] - mu.Unlock() - return ok + clearUnknown(mr, fieldNum(xt.Field)) } -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - epb, err := extendable(pb) - if err != nil { +// ClearAllExtensions clears all extensions from m. +// This includes populated fields and unknown fields in the extension range. +func ClearAllExtensions(m Message) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { return } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, extension.Field) + + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + if fd.IsExtension() { + mr.Clear(fd) + } + return true + }) + clearUnknown(mr, mr.Descriptor().ExtensionRanges()) } -// GetExtension retrieves a proto2 extended field from pb. +// GetExtension retrieves a proto2 extended field from m. // // If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), // then GetExtension parses the encoded field and returns a Go value of the specified type. // If the field is not present, then the default value is returned (if one is specified), // otherwise ErrMissingExtension is reported. // -// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes of the field extension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - - if extension.ExtendedType != nil { - // can only check type if this is a complete descriptor - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err +// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes for the extension field. +func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return nil, errNotExtendable + } + + // Retrieve the unknown fields for this extension field. + var bo protoreflect.RawFields + for bi := mr.GetUnknown(); len(bi) > 0; { + num, _, n := protowire.ConsumeField(bi) + if int32(num) == xt.Field { + bo = append(bo, bi[:n]...) } + bi = bi[n:] } - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return extensionAsLegacyType(e.value), nil + // For type incomplete descriptors, only retrieve the unknown fields. + if xt.ExtensionType == nil { + return []byte(bo), nil } - if extension.ExtensionType == nil { - // incomplete descriptor - return e.enc, nil + // If the extension field only exists as unknown fields, unmarshal it. + // This is rarely done since proto.Unmarshal eagerly unmarshals extensions. + xtd := xt.TypeDescriptor() + if !isValidExtension(mr.Descriptor(), xtd) { + return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err + if !mr.Has(xtd) && len(bo) > 0 { + m2 := mr.New() + if err := (proto.UnmarshalOptions{ + Resolver: extensionResolver{xt}, + }.Unmarshal(bo, m2.Interface())); err != nil { + return nil, err + } + if m2.Has(xtd) { + mr.Set(xtd, m2.Get(xtd)) + clearUnknown(mr, fieldNum(xt.Field)) + } } - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = extensionAsStorageType(v) - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return extensionAsLegacyType(e.value), nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - if extension.ExtensionType == nil { - // incomplete descriptor, so no default + // Check whether the message has the extension field set or a default. + var pv protoreflect.Value + switch { + case mr.Has(xtd): + pv = mr.Get(xtd) + case xtd.HasDefault(): + pv = xtd.Default() + default: return nil, ErrMissingExtension } - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension + v := xt.InterfaceOf(pv) + rv := reflect.ValueOf(v) + if isScalarKind(rv.Kind()) { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() } + return v, nil +} - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } +// extensionResolver is a custom extension resolver that stores a single +// extension type that takes precedence over the global registry. +type extensionResolver struct{ xt protoreflect.ExtensionType } - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) +func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field { + return r.xt, nil } - return value.Interface(), nil + return protoregistry.GlobalTypes.FindExtensionByName(field) } -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - unmarshal := typeUnmarshaler(t, extension.Tag) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate space to store the pointer/slice. - value := reflect.New(t).Elem() - - var err error - for { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - wire := int(x) & 7 - - b, err = unmarshal(b, valToPointer(value.Addr()), wire) - if err != nil { - return nil, err - } - - if len(b) == 0 { - break - } +func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field { + return r.xt, nil } - return value.Interface(), nil + return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) } -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, err := extendable(pb) - if err != nil { - return nil, err +// GetExtensions returns a list of the extensions values present in m, +// corresponding with the provided list of extension descriptors, xts. +// If an extension is missing in m, the corresponding value is nil. +func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return nil, errNotExtendable } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } + + vs := make([]interface{}, len(xts)) + for i, xt := range xts { + v, err := GetExtension(m, xt) if err != nil { - return + if err == ErrMissingExtension { + continue + } + return vs, err } + vs[i] = v } - return + return vs, nil } -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err +// SetExtension sets an extension field in m to the provided value. +func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return errNotExtendable } - registeredExtensions := RegisteredExtensions(pb) - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil + rv := reflect.ValueOf(v) + if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) { + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType) } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } + if rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", v) + } + if isScalarKind(rv.Elem().Kind()) { + v = rv.Elem().Interface() } - - extensions = append(extensions, desc) } - return extensions, nil -} -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, err := extendable(pb) - if err != nil { - return err - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) + xtd := xt.TypeDescriptor() + if !isValidExtension(mr.Descriptor(), xtd) { + return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)} + mr.Set(xtd, xt.ValueOf(v)) + clearUnknown(mr, fieldNum(xt.Field)) return nil } -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - epb, err := extendable(pb) - if err != nil { +// SetRawExtension inserts b into the unknown fields of m. +// +// Deprecated: Use Message.ProtoReflect.SetUnknown instead. +func SetRawExtension(m Message, fnum int32, b []byte) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { return } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) + + // Verify that the raw field is valid. + for b0 := b; len(b0) > 0; { + num, _, n := protowire.ConsumeField(b0) + if int32(num) != fnum { + panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum)) + } + b0 = b0[n:] } -} -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. + ClearExtension(m, &ExtensionDesc{Field: fnum}) + mr.SetUnknown(append(mr.GetUnknown(), b...)) +} -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) +// ExtensionDescs returns a list of extension descriptors found in m, +// containing descriptors for both populated extension fields in m and +// also unknown fields of m that are in the extension range. +// For the later case, an type incomplete descriptor is provided where only +// the ExtensionDesc.Field field is populated. +// The order of the extension descriptors is undefined. +func ExtensionDescs(m Message) ([]*ExtensionDesc, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return nil, errNotExtendable + } -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m + // Collect a set of known extension descriptors. + extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc) + mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + xt := fd.(protoreflect.ExtensionTypeDescriptor) + if xd, ok := xt.Type().(*ExtensionDesc); ok { + extDescs[fd.Number()] = xd + } + } + return true + }) + + // Collect a set of unknown extension descriptors. + extRanges := mr.Descriptor().ExtensionRanges() + for b := mr.GetUnknown(); len(b) > 0; { + num, _, n := protowire.ConsumeField(b) + if extRanges.Has(num) && extDescs[num] == nil { + extDescs[num] = nil + } + b = b[n:] } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + + // Transpose the set of descriptors into a list. + var xts []*ExtensionDesc + for num, xt := range extDescs { + if xt == nil { + xt = &ExtensionDesc{Field: int32(num)} + } + xts = append(xts, xt) } - m[desc.Field] = desc + return xts, nil } -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] +// isValidExtension reports whether xtd is a valid extension descriptor for md. +func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool { + return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number()) } -// extensionAsLegacyType converts an value in the storage type as the API type. -// See Extension.value. -func extensionAsLegacyType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { +// isScalarKind reports whether k is a protobuf scalar kind (except bytes). +// This function exists for historical reasons since the representation of +// scalars differs between v1 and v2, where v1 uses *T and v2 uses T. +func isScalarKind(k reflect.Kind) bool { + switch k { case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - // Represent primitive types as a pointer to the value. - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Slice: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } + return true + default: + return false } - return v } -// extensionAsStorageType converts an value in the API type as the storage type. -// See Extension.value. -func extensionAsStorageType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } - case reflect.Slice: - // Represent slice types as a pointer to the value. - if rv.Type().Elem().Kind() != reflect.Uint8 { - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() +// clearUnknown removes unknown fields from m where remover.Has reports true. +func clearUnknown(m protoreflect.Message, remover interface { + Has(protoreflect.FieldNumber) bool +}) { + var bo protoreflect.RawFields + for bi := m.GetUnknown(); len(bi) > 0; { + num, _, n := protowire.ConsumeField(bi) + if !remover.Has(num) { + bo = append(bo, bi[:n]...) } + bi = bi[n:] } - return v + if bi := m.GetUnknown(); len(bi) != len(bo) { + m.SetUnknown(bo) + } +} + +type fieldNum protoreflect.FieldNumber + +func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool { + return protoreflect.FieldNumber(n1) == n2 } diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go deleted file mode 100644 index fdd328b..0000000 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ /dev/null @@ -1,965 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/golang/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/golang/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. -// Marshal reports this when a required field is not initialized. -// Unmarshal reports this when a required field is missing from the wire data. -type RequiredNotSetError struct{ field string } - -func (e *RequiredNotSetError) Error() string { - if e.field == "" { - return fmt.Sprintf("proto: required field not set") - } - return fmt.Sprintf("proto: required field %q not set", e.field) -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -type invalidUTF8Error struct{ field string } - -func (e *invalidUTF8Error) Error() string { - if e.field == "" { - return "proto: invalid UTF-8 detected" - } - return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) -} -func (e *invalidUTF8Error) InvalidUTF8() bool { - return true -} - -// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. -// This error should not be exposed to the external API as such errors should -// be recreated with the field information. -var errInvalidUTF8 = &invalidUTF8Error{} - -// isNonFatal reports whether the error is either a RequiredNotSet error -// or a InvalidUTF8 error. -func isNonFatal(err error) bool { - if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { - return true - } - if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { - return true - } - return false -} - -type nonFatal struct{ E error } - -// Merge merges err into nf and reports whether it was successful. -// Otherwise it returns false for any fatal non-nil errors. -func (nf *nonFatal) Merge(err error) (ok bool) { - if err == nil { - return true // not an error - } - if !isNonFatal(err) { - return false // fatal error - } - if nf.E == nil { - nf.E = err // store first instance of non-fatal error - } - return true -} - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - deterministic bool -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -// SetDeterministic sets whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (p *Buffer) SetDeterministic(deterministic bool) { - p.deterministic = deterministic -} - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - index := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = index -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a pointer to a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or []*T or map[T]*T - switch f.Kind() { - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// mapKeys returns a sort.Interface to be used for sorting the map keys. -// Map fields may have key types of non-float scalars, strings and enums. -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{vs: vs} - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - case reflect.Bool: - s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true - case reflect.String: - s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } - default: - panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -const ( - // ProtoPackageIsVersion3 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion3 = true - - // ProtoPackageIsVersion2 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion2 = true - - // ProtoPackageIsVersion1 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion1 = true -) - -// InternalMessageInfo is a type used internally by generated .pb.go files. -// This type is not intended to be used by non-generated code. -// This type is not subject to any compatibility guarantee. -type InternalMessageInfo struct { - marshal *marshalInfo - unmarshal *unmarshalInfo - merge *mergeInfo - discard *discardInfo -} diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go deleted file mode 100644 index f48a756..0000000 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ /dev/null @@ -1,181 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "errors" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - return ms.find(pb) != nil -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func unmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go deleted file mode 100644 index 94fa919..0000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,360 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" - "sync" -) - -const unsafeAllowed = false - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// zeroField is a noop when calling pointer.offset. -var zeroField = field([]int{}) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// The pointer type is for the table-driven decoder. -// The implementation here uses a reflect.Value of pointer type to -// create a generic pointer. In pointer_unsafe.go we use unsafe -// instead of reflect to implement the same (but faster) interface. -type pointer struct { - v reflect.Value -} - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - return pointer{v: reflect.ValueOf(*i)} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) pointer { - v := reflect.ValueOf(*i) - u := reflect.New(v.Type()) - u.Elem().Set(v) - if deref { - u = u.Elem() - } - return pointer{v: u} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{v: v} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} -} - -func (p pointer) isNil() bool { - return p.v.IsNil() -} - -// grow updates the slice s in place to make it one element longer. -// s must be addressable. -// Returns the (addressable) new element. -func grow(s reflect.Value) reflect.Value { - n, m := s.Len(), s.Cap() - if n < m { - s.SetLen(n + 1) - } else { - s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) - } - return s.Index(n) -} - -func (p pointer) toInt64() *int64 { - return p.v.Interface().(*int64) -} -func (p pointer) toInt64Ptr() **int64 { - return p.v.Interface().(**int64) -} -func (p pointer) toInt64Slice() *[]int64 { - return p.v.Interface().(*[]int64) -} - -var int32ptr = reflect.TypeOf((*int32)(nil)) - -func (p pointer) toInt32() *int32 { - return p.v.Convert(int32ptr).Interface().(*int32) -} - -// The toInt32Ptr/Slice methods don't work because of enums. -// Instead, we must use set/get methods for the int32ptr/slice case. -/* - func (p pointer) toInt32Ptr() **int32 { - return p.v.Interface().(**int32) -} - func (p pointer) toInt32Slice() *[]int32 { - return p.v.Interface().(*[]int32) -} -*/ -func (p pointer) getInt32Ptr() *int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().(*int32) - } - // an enum - return p.v.Elem().Convert(int32PtrType).Interface().(*int32) -} -func (p pointer) setInt32Ptr(v int32) { - // Allocate value in a *int32. Possibly convert that to a *enum. - // Then assign it to a **int32 or **enum. - // Note: we can convert *int32 to *enum, but we can't convert - // **int32 to **enum! - p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) -} - -// getInt32Slice copies []int32 from p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getInt32Slice() []int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().([]int32) - } - // an enum - // Allocate a []int32, then assign []enum's values into it. - // Note: we can't convert []enum to []int32. - slice := p.v.Elem() - s := make([]int32, slice.Len()) - for i := 0; i < slice.Len(); i++ { - s[i] = int32(slice.Index(i).Int()) - } - return s -} - -// setInt32Slice copies []int32 into p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setInt32Slice(v []int32) { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - p.v.Elem().Set(reflect.ValueOf(v)) - return - } - // an enum - // Allocate a []enum, then assign []int32's values into it. - // Note: we can't convert []enum to []int32. - slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) - for i, x := range v { - slice.Index(i).SetInt(int64(x)) - } - p.v.Elem().Set(slice) -} -func (p pointer) appendInt32Slice(v int32) { - grow(p.v.Elem()).SetInt(int64(v)) -} - -func (p pointer) toUint64() *uint64 { - return p.v.Interface().(*uint64) -} -func (p pointer) toUint64Ptr() **uint64 { - return p.v.Interface().(**uint64) -} -func (p pointer) toUint64Slice() *[]uint64 { - return p.v.Interface().(*[]uint64) -} -func (p pointer) toUint32() *uint32 { - return p.v.Interface().(*uint32) -} -func (p pointer) toUint32Ptr() **uint32 { - return p.v.Interface().(**uint32) -} -func (p pointer) toUint32Slice() *[]uint32 { - return p.v.Interface().(*[]uint32) -} -func (p pointer) toBool() *bool { - return p.v.Interface().(*bool) -} -func (p pointer) toBoolPtr() **bool { - return p.v.Interface().(**bool) -} -func (p pointer) toBoolSlice() *[]bool { - return p.v.Interface().(*[]bool) -} -func (p pointer) toFloat64() *float64 { - return p.v.Interface().(*float64) -} -func (p pointer) toFloat64Ptr() **float64 { - return p.v.Interface().(**float64) -} -func (p pointer) toFloat64Slice() *[]float64 { - return p.v.Interface().(*[]float64) -} -func (p pointer) toFloat32() *float32 { - return p.v.Interface().(*float32) -} -func (p pointer) toFloat32Ptr() **float32 { - return p.v.Interface().(**float32) -} -func (p pointer) toFloat32Slice() *[]float32 { - return p.v.Interface().(*[]float32) -} -func (p pointer) toString() *string { - return p.v.Interface().(*string) -} -func (p pointer) toStringPtr() **string { - return p.v.Interface().(**string) -} -func (p pointer) toStringSlice() *[]string { - return p.v.Interface().(*[]string) -} -func (p pointer) toBytes() *[]byte { - return p.v.Interface().(*[]byte) -} -func (p pointer) toBytesSlice() *[][]byte { - return p.v.Interface().(*[][]byte) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return p.v.Interface().(*XXX_InternalExtensions) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return p.v.Interface().(*map[int32]Extension) -} -func (p pointer) getPointer() pointer { - return pointer{v: p.v.Elem()} -} -func (p pointer) setPointer(q pointer) { - p.v.Elem().Set(q.v) -} -func (p pointer) appendPointer(q pointer) { - grow(p.v.Elem()).Set(q.v) -} - -// getPointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getPointerSlice() []pointer { - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// setPointerSlice copies []pointer into p as a new []*T. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setPointerSlice(v []pointer) { - if v == nil { - p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) - return - } - s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) - for _, p := range v { - s = reflect.Append(s, p.v) - } - p.v.Elem().Set(s) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - if p.v.Elem().IsNil() { - return pointer{v: p.v.Elem()} - } - return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct -} - -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - // TODO: check that p.v.Type().Elem() == t? - return p.v -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} - -var atomicLock sync.Mutex diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index dbfffe0..0000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,313 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "sync/atomic" - "unsafe" -) - -const unsafeAllowed = true - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// zeroField is a noop when calling pointer.offset. -const zeroField = field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != invalidField -} - -// The pointer type below is for the new table-driven encoder/decoder. -// The implementation here uses unsafe.Pointer to create a generic pointer. -// In pointer_reflect.go we use reflect instead of unsafe to implement -// the same (but slower) interface. -type pointer struct { - p unsafe.Pointer -} - -// size of pointer -var ptrSize = unsafe.Sizeof(uintptr(0)) - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - // Super-tricky - read pointer out of data word of interface value. - // Saves ~25ns over the equivalent: - // return valToPointer(reflect.ValueOf(*i)) - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) { - // Super-tricky - read or get the address of data word of interface value. - if isptr { - // The interface is of pointer type, thus it is a direct interface. - // The data word is the pointer data itself. We take its address. - p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} - } else { - // The interface is not of pointer type. The data word is the pointer - // to the data. - p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} - } - if deref { - p.p = *(*unsafe.Pointer)(p.p) - } - return p -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{p: unsafe.Pointer(v.Pointer())} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - // For safety, we should panic if !f.IsValid, however calling panic causes - // this to no longer be inlineable, which is a serious performance cost. - /* - if !f.IsValid() { - panic("invalid field") - } - */ - return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} -} - -func (p pointer) isNil() bool { - return p.p == nil -} - -func (p pointer) toInt64() *int64 { - return (*int64)(p.p) -} -func (p pointer) toInt64Ptr() **int64 { - return (**int64)(p.p) -} -func (p pointer) toInt64Slice() *[]int64 { - return (*[]int64)(p.p) -} -func (p pointer) toInt32() *int32 { - return (*int32)(p.p) -} - -// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. -/* - func (p pointer) toInt32Ptr() **int32 { - return (**int32)(p.p) - } - func (p pointer) toInt32Slice() *[]int32 { - return (*[]int32)(p.p) - } -*/ -func (p pointer) getInt32Ptr() *int32 { - return *(**int32)(p.p) -} -func (p pointer) setInt32Ptr(v int32) { - *(**int32)(p.p) = &v -} - -// getInt32Slice loads a []int32 from p. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getInt32Slice() []int32 { - return *(*[]int32)(p.p) -} - -// setInt32Slice stores a []int32 to p. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setInt32Slice(v []int32) { - *(*[]int32)(p.p) = v -} - -// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? -func (p pointer) appendInt32Slice(v int32) { - s := (*[]int32)(p.p) - *s = append(*s, v) -} - -func (p pointer) toUint64() *uint64 { - return (*uint64)(p.p) -} -func (p pointer) toUint64Ptr() **uint64 { - return (**uint64)(p.p) -} -func (p pointer) toUint64Slice() *[]uint64 { - return (*[]uint64)(p.p) -} -func (p pointer) toUint32() *uint32 { - return (*uint32)(p.p) -} -func (p pointer) toUint32Ptr() **uint32 { - return (**uint32)(p.p) -} -func (p pointer) toUint32Slice() *[]uint32 { - return (*[]uint32)(p.p) -} -func (p pointer) toBool() *bool { - return (*bool)(p.p) -} -func (p pointer) toBoolPtr() **bool { - return (**bool)(p.p) -} -func (p pointer) toBoolSlice() *[]bool { - return (*[]bool)(p.p) -} -func (p pointer) toFloat64() *float64 { - return (*float64)(p.p) -} -func (p pointer) toFloat64Ptr() **float64 { - return (**float64)(p.p) -} -func (p pointer) toFloat64Slice() *[]float64 { - return (*[]float64)(p.p) -} -func (p pointer) toFloat32() *float32 { - return (*float32)(p.p) -} -func (p pointer) toFloat32Ptr() **float32 { - return (**float32)(p.p) -} -func (p pointer) toFloat32Slice() *[]float32 { - return (*[]float32)(p.p) -} -func (p pointer) toString() *string { - return (*string)(p.p) -} -func (p pointer) toStringPtr() **string { - return (**string)(p.p) -} -func (p pointer) toStringSlice() *[]string { - return (*[]string)(p.p) -} -func (p pointer) toBytes() *[]byte { - return (*[]byte)(p.p) -} -func (p pointer) toBytesSlice() *[][]byte { - return (*[][]byte)(p.p) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(p.p) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return (*map[int32]Extension)(p.p) -} - -// getPointerSlice loads []*T from p as a []pointer. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getPointerSlice() []pointer { - // Super-tricky - p should point to a []*T where T is a - // message type. We load it as []pointer. - return *(*[]pointer)(p.p) -} - -// setPointerSlice stores []pointer into p as a []*T. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setPointerSlice(v []pointer) { - // Super-tricky - p should point to a []*T where T is a - // message type. We store it as []pointer. - *(*[]pointer)(p.p) = v -} - -// getPointer loads the pointer at p and returns it. -func (p pointer) getPointer() pointer { - return pointer{p: *(*unsafe.Pointer)(p.p)} -} - -// setPointer stores the pointer q at p. -func (p pointer) setPointer(q pointer) { - *(*unsafe.Pointer)(p.p) = q.p -} - -// append q to the slice pointed to by p. -func (p pointer) appendPointer(q pointer) { - s := (*[]unsafe.Pointer)(p.p) - *s = append(*s, q.p) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - // Super-tricky - read pointer out of data word of interface value. - return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} -} - -// asPointerTo returns a reflect.Value that is a pointer to an -// object of type t stored at p. -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - return reflect.NewAt(t, p.p) -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index a4b8c0c..dcdc220 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -1,162 +1,104 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - import ( "fmt" - "log" "reflect" - "sort" "strconv" "strings" "sync" -) - -const debug bool = false -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoimpl" ) -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. +// StructProperties represents protocol buffer type information for a +// generated protobuf message in the open-struct API. +// +// Deprecated: Do not use. type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order + // Prop are the properties for each field. + // + // Fields belonging to a oneof are stored in OneofTypes instead, with a + // single Properties representing the parent oneof held here. + // + // The order of Prop matches the order of fields in the Go struct. + // Struct fields that are not related to protobufs have a "XXX_" prefix + // in the Properties.Name and must be ignored by the user. + Prop []*Properties // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. + // It is keyed by the protobuf field name. OneofTypes map[string]*OneofProperties } -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. +// Properties represents the type information for a protobuf message field. +// +// Deprecated: Do not use. type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string + // Name is a placeholder name with little meaningful semantic value. + // If the name has an "XXX_" prefix, the entire Properties must be ignored. + Name string + // OrigName is the protobuf field name or oneof name. + OrigName string + // JSONName is the JSON name for the protobuf field. + JSONName string + // Enum is a placeholder name for enums. + // For historical reasons, this is neither the Go name for the enum, + // nor the protobuf name for the enum. + Enum string // Deprecated: Do not use. + // Weak contains the full name of the weakly referenced message. + Weak string + // Wire is a string representation of the wire type. + Wire string + // WireType is the protobuf wire type for the field. WireType int - Tag int + // Tag is the protobuf field number. + Tag int + // Required reports whether this is a required field. Required bool + // Optional reports whether this is a optional field. Optional bool + // Repeated reports whether this is a repeated field. Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only + // Packed reports whether this is a packed repeated field of scalars. + Packed bool + // Proto3 reports whether this field operates under the proto3 syntax. + Proto3 bool + // Oneof reports whether this field belongs within a oneof. + Oneof bool + + // Default is the default value in string form. + Default string + // HasDefault reports whether the field has a default value. + HasDefault bool + + // MapKeyProp is the properties for the key field for a map field. + MapKeyProp *Properties + // MapValProp is the properties for the value field for a map field. + MapValProp *Properties +} - mtype reflect.Type // set for map types only - MapKeyProp *Properties // set for map types only - MapValProp *Properties // set for map types only +// OneofProperties represents the type information for a protobuf oneof. +// +// Deprecated: Do not use. +type OneofProperties struct { + // Type is a pointer to the generated wrapper type for the field value. + // This is nil for messages that are not in the open-struct API. + Type reflect.Type + // Field is the index into StructProperties.Prop for the containing oneof. + Field int + // Prop is the properties for the field. + Prop *Properties } // String formats the properties in the protobuf struct field tag style. func (p *Properties) String() string { s := p.Wire - s += "," - s += strconv.Itoa(p.Tag) + s += "," + strconv.Itoa(p.Tag) if p.Required { s += ",req" } @@ -170,18 +112,21 @@ func (p *Properties) String() string { s += ",packed" } s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { + if p.JSONName != "" { s += ",json=" + p.JSONName } - if p.proto3 { + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if len(p.Weak) > 0 { + s += ",weak=" + p.Weak + } + if p.Proto3 { s += ",proto3" } - if p.oneof { + if p.Oneof { s += ",oneof" } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } if p.HasDefault { s += ",def=" + p.Default } @@ -189,356 +134,173 @@ func (p *Properties) String() string { } // Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - log.Printf("proto: tag has too few fields: %q", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - case "fixed32": - p.WireType = WireFixed32 - case "fixed64": - p.WireType = WireFixed64 - case "zigzag32": - p.WireType = WireVarint - case "zigzag64": - p.WireType = WireVarint - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - log.Printf("proto: tag has unknown wire type: %q", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - -outer: - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": +func (p *Properties) Parse(tag string) { + // For example: "bytes,49,opt,name=foo,def=hello!" + for len(tag) > 0 { + i := strings.IndexByte(tag, ',') + if i < 0 { + i = len(tag) + } + switch s := tag[:i]; { + case strings.HasPrefix(s, "name="): + p.OrigName = s[len("name="):] + case strings.HasPrefix(s, "json="): + p.JSONName = s[len("json="):] + case strings.HasPrefix(s, "enum="): + p.Enum = s[len("enum="):] + case strings.HasPrefix(s, "weak="): + p.Weak = s[len("weak="):] + case strings.Trim(s, "0123456789") == "": + n, _ := strconv.ParseUint(s, 10, 32) + p.Tag = int(n) + case s == "opt": p.Optional = true - case f == "rep": + case s == "req": + p.Required = true + case s == "rep": p.Repeated = true - case f == "packed": + case s == "varint" || s == "zigzag32" || s == "zigzag64": + p.Wire = s + p.WireType = WireVarint + case s == "fixed32": + p.Wire = s + p.WireType = WireFixed32 + case s == "fixed64": + p.Wire = s + p.WireType = WireFixed64 + case s == "bytes": + p.Wire = s + p.WireType = WireBytes + case s == "group": + p.Wire = s + p.WireType = WireStartGroup + case s == "packed": p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): + case s == "proto3": + p.Proto3 = true + case s == "oneof": + p.Oneof = true + case strings.HasPrefix(s, "def="): + // The default tag is special in that everything afterwards is the + // default regardless of the presence of commas. p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break outer - } - } - } -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// setFieldProps initializes the field properties for submessages and maps. -func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - switch t1 := typ; t1.Kind() { - case reflect.Ptr: - if t1.Elem().Kind() == reflect.Struct { - p.stype = t1.Elem() - } - - case reflect.Slice: - if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { - p.stype = t2.Elem() - } - - case reflect.Map: - p.mtype = t1 - p.MapKeyProp = &Properties{} - p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.MapValProp = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) + p.Default, i = tag[len("def="):], len(tag) } + tag = strings.TrimPrefix(tag[i:], ",") } } -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() -) - // Init populates the properties from a protocol buffer struct tag. +// +// Deprecated: Do not use. func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" p.Name = name p.OrigName = name if tag == "" { return } p.Parse(tag) - p.setFieldProps(typ, f, lockGetProp) + + if typ != nil && typ.Kind() == reflect.Map { + p.MapKeyProp = new(Properties) + p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil) + p.MapValProp = new(Properties) + p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil) + } } -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) +var propertiesCache sync.Map // map[reflect.Type]*StructProperties -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. +// GetProperties returns the list of properties for the type represented by t, +// which must be a generated protocol buffer message in the open-struct API, +// where protobuf message fields are represented by exported Go struct fields. +// +// Deprecated: Use protobuf reflection instead. func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - return sprop + if p, ok := propertiesCache.Load(t); ok { + return p.(*StructProperties) } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop + p, _ := propertiesCache.LoadOrStore(t, newProperties(t)) + return p.(*StructProperties) } -type ( - oneofFuncsIface interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - oneofWrappersIface interface { - XXX_OneofWrappers() []interface{} - } -) - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - return prop +func newProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) } + var hasOneof bool prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) + // Construct a list of properties for each field in the struct. for i := 0; i < t.NumField(); i++ { - f := t.Field(i) p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + f := t.Field(i) + tagField := f.Tag.Get("protobuf") + p.Init(f.Type, f.Name, tagField, &f) - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof + tagOneof := f.Tag.Get("protobuf_oneof") + if tagOneof != "" { + hasOneof = true + p.OrigName = tagOneof } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") + + // Rename unrelated struct fields with the "XXX_" prefix since so much + // user code simply checks for this to exclude special fields. + if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") { + p.Name = "XXX_" + p.Name + p.OrigName = "XXX_" + p.OrigName + } else if p.Weak != "" { + p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field } + + prop.Prop = append(prop.Prop, p) } - // Re-order prop.order. - sort.Sort(prop) + // Construct a mapping of oneof field names to properties. + if hasOneof { + var oneofWrappers []interface{} + if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { + oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{}) + } + if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { + oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{}) + } + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok { + if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok { + oneofWrappers = m.ProtoMessageInfo().OneofWrappers + } + } - var oots []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oots = m.XXX_OneofFuncs() - case oneofWrappersIface: - oots = m.XXX_OneofWrappers() - } - if len(oots) > 0 { - // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T + for _, wrapper := range oneofWrappers { + p := &OneofProperties{ + Type: reflect.ValueOf(wrapper).Type(), // *T Prop: new(Properties), } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue + f := p.Type.Elem().Field(0) + p.Prop.Name = f.Name + p.Prop.Parse(f.Tag.Get("protobuf")) + + // Determine the struct field that contains this oneof. + // Each wrapper is assignable to exactly one parent field. + var foundOneof bool + for i := 0; i < t.NumField() && !foundOneof; i++ { + if p.Type.AssignableTo(t.Field(i).Type) { + p.Field = i + foundOneof = true } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ + if !foundOneof { + panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) + } + prop.OneofTypes[p.Prop.OrigName] = p } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i } - prop.reqCount = reqCount return prop } -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers - protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypedNils[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { - // Generated code always calls RegisterType with nil x. - // This check is just for extra safety. - protoTypedNils[name] = x - } else { - protoTypedNils[name] = reflect.Zero(t).Interface().(Message) - } - revProtoTypes[t] = name -} - -// RegisterMapType is called from generated code and maps from the fully qualified -// proto name to the native map type of the proto map definition. -func RegisterMapType(x interface{}, name string) { - if reflect.TypeOf(x).Kind() != reflect.Map { - panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) - } - if _, ok := protoMapTypes[name]; ok { - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoMapTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -// The type is not guaranteed to implement proto.Message if the name refers to a -// map entry. -func MessageType(name string) reflect.Type { - if t, ok := protoTypedNils[name]; ok { - return reflect.TypeOf(t) - } - return protoMapTypes[name] -} - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } +func (sp *StructProperties) Len() int { return len(sp.Prop) } +func (sp *StructProperties) Less(i, j int) bool { return false } +func (sp *StructProperties) Swap(i, j int) { return } diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go new file mode 100644 index 0000000..5aee89c --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/proto.go @@ -0,0 +1,167 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proto provides functionality for handling protocol buffer messages. +// In particular, it provides marshaling and unmarshaling between a protobuf +// message and the binary wire format. +// +// See https://developers.google.com/protocol-buffers/docs/gotutorial for +// more information. +// +// Deprecated: Use the "google.golang.org/protobuf/proto" package instead. +package proto + +import ( + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + ProtoPackageIsVersion1 = true + ProtoPackageIsVersion2 = true + ProtoPackageIsVersion3 = true + ProtoPackageIsVersion4 = true +) + +// GeneratedEnum is any enum type generated by protoc-gen-go +// which is a named int32 kind. +// This type exists for documentation purposes. +type GeneratedEnum interface{} + +// GeneratedMessage is any message type generated by protoc-gen-go +// which is a pointer to a named struct kind. +// This type exists for documentation purposes. +type GeneratedMessage interface{} + +// Message is a protocol buffer message. +// +// This is the v1 version of the message interface and is marginally better +// than an empty interface as it lacks any method to programatically interact +// with the contents of the message. +// +// A v2 message is declared in "google.golang.org/protobuf/proto".Message and +// exposes protobuf reflection as a first-class feature of the interface. +// +// To convert a v1 message to a v2 message, use the MessageV2 function. +// To convert a v2 message to a v1 message, use the MessageV1 function. +type Message = protoiface.MessageV1 + +// MessageV1 converts either a v1 or v2 message to a v1 message. +// It returns nil if m is nil. +func MessageV1(m GeneratedMessage) protoiface.MessageV1 { + return protoimpl.X.ProtoMessageV1Of(m) +} + +// MessageV2 converts either a v1 or v2 message to a v2 message. +// It returns nil if m is nil. +func MessageV2(m GeneratedMessage) protoV2.Message { + return protoimpl.X.ProtoMessageV2Of(m) +} + +// MessageReflect returns a reflective view for a message. +// It returns nil if m is nil. +func MessageReflect(m Message) protoreflect.Message { + return protoimpl.X.MessageOf(m) +} + +// Marshaler is implemented by messages that can marshal themselves. +// This interface is used by the following functions: Size, Marshal, +// Buffer.Marshal, and Buffer.EncodeMessage. +// +// Deprecated: Do not implement. +type Marshaler interface { + // Marshal formats the encoded bytes of the message. + // It should be deterministic and emit valid protobuf wire data. + // The caller takes ownership of the returned buffer. + Marshal() ([]byte, error) +} + +// Unmarshaler is implemented by messages that can unmarshal themselves. +// This interface is used by the following functions: Unmarshal, UnmarshalMerge, +// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup. +// +// Deprecated: Do not implement. +type Unmarshaler interface { + // Unmarshal parses the encoded bytes of the protobuf wire input. + // The provided buffer is only valid for during method call. + // It should not reset the receiver message. + Unmarshal([]byte) error +} + +// Merger is implemented by messages that can merge themselves. +// This interface is used by the following functions: Clone and Merge. +// +// Deprecated: Do not implement. +type Merger interface { + // Merge merges the contents of src into the receiver message. + // It clones all data structures in src such that it aliases no mutable + // memory referenced by src. + Merge(src Message) +} + +// RequiredNotSetError is an error type returned when +// marshaling or unmarshaling a message with missing required fields. +type RequiredNotSetError struct { + err error +} + +func (e *RequiredNotSetError) Error() string { + if e.err != nil { + return e.err.Error() + } + return "proto: required field not set" +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +func checkRequiredNotSet(m protoV2.Message) error { + if err := protoV2.CheckInitialized(m); err != nil { + return &RequiredNotSetError{err: err} + } + return nil +} + +// Clone returns a deep copy of src. +func Clone(src Message) Message { + return MessageV1(protoV2.Clone(MessageV2(src))) +} + +// Merge merges src into dst, which must be messages of the same type. +// +// Populated scalar fields in src are copied to dst, while populated +// singular messages in src are merged into dst by recursively calling Merge. +// The elements of every list field in src is appended to the corresponded +// list fields in dst. The entries of every map field in src is copied into +// the corresponding map field in dst, possibly replacing existing entries. +// The unknown fields of src are appended to the unknown fields of dst. +func Merge(dst, src Message) { + protoV2.Merge(MessageV2(dst), MessageV2(src)) +} + +// Equal reports whether two messages are equal. +// If two messages marshal to the same bytes under deterministic serialization, +// then Equal is guaranteed to report true. +// +// Two messages are equal if they are the same protobuf message type, +// have the same set of populated known and extension field values, +// and the same set of unknown fields values. +// +// Scalar values are compared with the equivalent of the == operator in Go, +// except bytes values which are compared using bytes.Equal and +// floating point values which specially treat NaNs as equal. +// Message values are compared by recursively calling Equal. +// Lists are equal if each element value is also equal. +// Maps are equal if they have the same set of keys, where the pair of values +// for each key is also equal. +func Equal(x, y Message) bool { + return protoV2.Equal(MessageV2(x), MessageV2(y)) +} + +func isMessageSet(md protoreflect.MessageDescriptor) bool { + ms, ok := md.(interface{ IsMessageSet() bool }) + return ok && ms.IsMessageSet() +} diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go new file mode 100644 index 0000000..1e7ff64 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/registry.go @@ -0,0 +1,323 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + "reflect" + "strings" + "sync" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// filePath is the path to the proto source file. +type filePath = string // e.g., "google/protobuf/descriptor.proto" + +// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto. +type fileDescGZIP = []byte + +var fileCache sync.Map // map[filePath]fileDescGZIP + +// RegisterFile is called from generated code to register the compressed +// FileDescriptorProto with the file path for a proto source file. +// +// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead. +func RegisterFile(s filePath, d fileDescGZIP) { + // Decompress the descriptor. + zr, err := gzip.NewReader(bytes.NewReader(d)) + if err != nil { + panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) + } + b, err := ioutil.ReadAll(zr) + if err != nil { + panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) + } + + // Construct a protoreflect.FileDescriptor from the raw descriptor. + // Note that DescBuilder.Build automatically registers the constructed + // file descriptor with the v2 registry. + protoimpl.DescBuilder{RawDescriptor: b}.Build() + + // Locally cache the raw descriptor form for the file. + fileCache.Store(s, d) +} + +// FileDescriptor returns the compressed FileDescriptorProto given the file path +// for a proto source file. It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead. +func FileDescriptor(s filePath) fileDescGZIP { + if v, ok := fileCache.Load(s); ok { + return v.(fileDescGZIP) + } + + // Find the descriptor in the v2 registry. + var b []byte + if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { + if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok { + b = fd.ProtoLegacyRawDesc() + } else { + // TODO: Use protodesc.ToFileDescriptorProto to construct + // a descriptorpb.FileDescriptorProto and marshal it. + // However, doing so causes the proto package to have a dependency + // on descriptorpb, leading to cyclic dependency issues. + } + } + + // Locally cache the raw descriptor form for the file. + if len(b) > 0 { + v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b)) + return v.(fileDescGZIP) + } + return nil +} + +// enumName is the name of an enum. For historical reasons, the enum name is +// neither the full Go name nor the full protobuf name of the enum. +// The name is the dot-separated combination of just the proto package that the +// enum is declared within followed by the Go type name of the generated enum. +type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum" + +// enumsByName maps enum values by name to their numeric counterpart. +type enumsByName = map[string]int32 + +// enumsByNumber maps enum values by number to their name counterpart. +type enumsByNumber = map[int32]string + +var enumCache sync.Map // map[enumName]enumsByName +var numFilesCache sync.Map // map[protoreflect.FullName]int + +// RegisterEnum is called from the generated code to register the mapping of +// enum value names to enum numbers for the enum identified by s. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead. +func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) { + if _, ok := enumCache.Load(s); ok { + panic("proto: duplicate enum registered: " + s) + } + enumCache.Store(s, m) + + // This does not forward registration to the v2 registry since this API + // lacks sufficient information to construct a complete v2 enum descriptor. +} + +// EnumValueMap returns the mapping from enum value names to enum numbers for +// the enum of the given name. It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead. +func EnumValueMap(s enumName) enumsByName { + if v, ok := enumCache.Load(s); ok { + return v.(enumsByName) + } + + // Check whether the cache is stale. If the number of files in the current + // package differs, then it means that some enums may have been recently + // registered upstream that we do not know about. + var protoPkg protoreflect.FullName + if i := strings.LastIndexByte(s, '.'); i >= 0 { + protoPkg = protoreflect.FullName(s[:i]) + } + v, _ := numFilesCache.Load(protoPkg) + numFiles, _ := v.(int) + if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles { + return nil // cache is up-to-date; was not found earlier + } + + // Update the enum cache for all enums declared in the given proto package. + numFiles = 0 + protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool { + walkEnums(fd, func(ed protoreflect.EnumDescriptor) { + name := protoimpl.X.LegacyEnumName(ed) + if _, ok := enumCache.Load(name); !ok { + m := make(enumsByName) + evs := ed.Values() + for i := evs.Len() - 1; i >= 0; i-- { + ev := evs.Get(i) + m[string(ev.Name())] = int32(ev.Number()) + } + enumCache.LoadOrStore(name, m) + } + }) + numFiles++ + return true + }) + numFilesCache.Store(protoPkg, numFiles) + + // Check cache again for enum map. + if v, ok := enumCache.Load(s); ok { + return v.(enumsByName) + } + return nil +} + +// walkEnums recursively walks all enums declared in d. +func walkEnums(d interface { + Enums() protoreflect.EnumDescriptors + Messages() protoreflect.MessageDescriptors +}, f func(protoreflect.EnumDescriptor)) { + eds := d.Enums() + for i := eds.Len() - 1; i >= 0; i-- { + f(eds.Get(i)) + } + mds := d.Messages() + for i := mds.Len() - 1; i >= 0; i-- { + walkEnums(mds.Get(i), f) + } +} + +// messageName is the full name of protobuf message. +type messageName = string + +var messageTypeCache sync.Map // map[messageName]reflect.Type + +// RegisterType is called from generated code to register the message Go type +// for a message of the given name. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead. +func RegisterType(m Message, s messageName) { + mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s)) + if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil { + panic(err) + } + messageTypeCache.Store(s, reflect.TypeOf(m)) +} + +// RegisterMapType is called from generated code to register the Go map type +// for a protobuf message representing a map entry. +// +// Deprecated: Do not use. +func RegisterMapType(m interface{}, s messageName) { + t := reflect.TypeOf(m) + if t.Kind() != reflect.Map { + panic(fmt.Sprintf("invalid map kind: %v", t)) + } + if _, ok := messageTypeCache.Load(s); ok { + panic(fmt.Errorf("proto: duplicate proto message registered: %s", s)) + } + messageTypeCache.Store(s, t) +} + +// MessageType returns the message type for a named message. +// It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead. +func MessageType(s messageName) reflect.Type { + if v, ok := messageTypeCache.Load(s); ok { + return v.(reflect.Type) + } + + // Derive the message type from the v2 registry. + var t reflect.Type + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil { + t = messageGoType(mt) + } + + // If we could not get a concrete type, it is possible that it is a + // pseudo-message for a map entry. + if t == nil { + d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s)) + if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() { + kt := goTypeForField(md.Fields().ByNumber(1)) + vt := goTypeForField(md.Fields().ByNumber(2)) + t = reflect.MapOf(kt, vt) + } + } + + // Locally cache the message type for the given name. + if t != nil { + v, _ := messageTypeCache.LoadOrStore(s, t) + return v.(reflect.Type) + } + return nil +} + +func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type { + switch k := fd.Kind(); k { + case protoreflect.EnumKind: + if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil { + return enumGoType(et) + } + return reflect.TypeOf(protoreflect.EnumNumber(0)) + case protoreflect.MessageKind, protoreflect.GroupKind: + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil { + return messageGoType(mt) + } + return reflect.TypeOf((*protoreflect.Message)(nil)).Elem() + default: + return reflect.TypeOf(fd.Default().Interface()) + } +} + +func enumGoType(et protoreflect.EnumType) reflect.Type { + return reflect.TypeOf(et.New(0)) +} + +func messageGoType(mt protoreflect.MessageType) reflect.Type { + return reflect.TypeOf(MessageV1(mt.Zero().Interface())) +} + +// MessageName returns the full protobuf name for the given message type. +// +// Deprecated: Use protoreflect.MessageDescriptor.FullName instead. +func MessageName(m Message) messageName { + if m == nil { + return "" + } + if m, ok := m.(interface{ XXX_MessageName() messageName }); ok { + return m.XXX_MessageName() + } + return messageName(protoimpl.X.MessageDescriptorOf(m).FullName()) +} + +// RegisterExtension is called from the generated code to register +// the extension descriptor. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead. +func RegisterExtension(d *ExtensionDesc) { + if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil { + panic(err) + } +} + +type extensionsByNumber = map[int32]*ExtensionDesc + +var extensionCache sync.Map // map[messageName]extensionsByNumber + +// RegisteredExtensions returns a map of the registered extensions for the +// provided protobuf message, indexed by the extension field number. +// +// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead. +func RegisteredExtensions(m Message) extensionsByNumber { + // Check whether the cache is stale. If the number of extensions for + // the given message differs, then it means that some extensions were + // recently registered upstream that we do not know about. + s := MessageName(m) + v, _ := extensionCache.Load(s) + xs, _ := v.(extensionsByNumber) + if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) { + return xs // cache is up-to-date + } + + // Cache is stale, re-compute the extensions map. + xs = make(extensionsByNumber) + protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool { + if xd, ok := xt.(*ExtensionDesc); ok { + xs[int32(xt.TypeDescriptor().Number())] = xd + } else { + // TODO: This implies that the protoreflect.ExtensionType is a + // custom type not generated by protoc-gen-go. We could try and + // convert the type to an ExtensionDesc. + } + return true + }) + extensionCache.Store(s, xs) + return xs +} diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go deleted file mode 100644 index 5cb11fa..0000000 --- a/vendor/github.com/golang/protobuf/proto/table_marshal.go +++ /dev/null @@ -1,2776 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// a sizer takes a pointer to a field and the size of its tag, computes the size of -// the encoded data. -type sizer func(pointer, int) int - -// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), -// marshals the field to the end of the slice, returns the slice and error (if any). -type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) - -// marshalInfo is the information used for marshaling a message. -type marshalInfo struct { - typ reflect.Type - fields []*marshalFieldInfo - unrecognized field // offset of XXX_unrecognized - extensions field // offset of XXX_InternalExtensions - v1extensions field // offset of XXX_extensions - sizecache field // offset of XXX_sizecache - initialized int32 // 0 -- only typ is set, 1 -- fully initialized - messageset bool // uses message set wire format - hasmarshaler bool // has custom marshaler - sync.RWMutex // protect extElems map, also for initialization - extElems map[int32]*marshalElemInfo // info of extension elements -} - -// marshalFieldInfo is the information used for marshaling a field of a message. -type marshalFieldInfo struct { - field field - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isPointer bool - required bool // field is required - name string // name of the field, for error reporting - oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements -} - -// marshalElemInfo is the information used for marshaling an extension or oneof element. -type marshalElemInfo struct { - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) - deref bool // dereference the pointer before operating on it; implies isptr -} - -var ( - marshalInfoMap = map[reflect.Type]*marshalInfo{} - marshalInfoLock sync.Mutex -) - -// getMarshalInfo returns the information to marshal a given type of message. -// The info it returns may not necessarily initialized. -// t is the type of the message (NOT the pointer to it). -func getMarshalInfo(t reflect.Type) *marshalInfo { - marshalInfoLock.Lock() - u, ok := marshalInfoMap[t] - if !ok { - u = &marshalInfo{typ: t} - marshalInfoMap[t] = u - } - marshalInfoLock.Unlock() - return u -} - -// Size is the entry point from generated code, -// and should be ONLY called by generated code. -// It computes the size of encoded data of msg. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Size(msg Message) int { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return 0 - } - return u.size(ptr) -} - -// Marshal is the entry point from generated code, -// and should be ONLY called by generated code. -// It marshals msg to the end of b. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return b, ErrNil - } - return u.marshal(b, ptr, deterministic) -} - -func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { - // u := a.marshal, but atomically. - // We use an atomic here to ensure memory consistency. - u := atomicLoadMarshalInfo(&a.marshal) - if u == nil { - // Get marshal information from type of message. - t := reflect.ValueOf(msg).Type() - if t.Kind() != reflect.Ptr { - panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) - } - u = getMarshalInfo(t.Elem()) - // Store it in the cache for later users. - // a.marshal = u, but atomically. - atomicStoreMarshalInfo(&a.marshal, u) - } - return u -} - -// size is the main function to compute the size of the encoded data of a message. -// ptr is the pointer to the message. -func (u *marshalInfo) size(ptr pointer) int { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b, _ := m.Marshal() - return len(b) - } - - n := 0 - for _, f := range u.fields { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - n += f.sizer(ptr.offset(f.field), f.tagsize) - } - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - n += u.sizeMessageSet(e) - } else { - n += u.sizeExtensions(e) - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - n += u.sizeV1Extensions(m) - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - n += len(s) - } - // cache the result for use in marshal - if u.sizecache.IsValid() { - atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) - } - return n -} - -// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), -// fall back to compute the size. -func (u *marshalInfo) cachedsize(ptr pointer) int { - if u.sizecache.IsValid() { - return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) - } - return u.size(ptr) -} - -// marshal is the main function to marshal a message. It takes a byte slice and appends -// the encoded data to the end of the slice, returns the slice and error (if any). -// ptr is the pointer to the message. -// If deterministic is true, map is marshaled in deterministic order. -func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b1, err := m.Marshal() - b = append(b, b1...) - return b, err - } - - var err, errLater error - // The old marshaler encodes extensions at beginning. - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - b, err = u.appendMessageSet(b, e, deterministic) - } else { - b, err = u.appendExtensions(b, e, deterministic) - } - if err != nil { - return b, err - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - b, err = u.appendV1Extensions(b, m, deterministic) - if err != nil { - return b, err - } - } - for _, f := range u.fields { - if f.required { - if ptr.offset(f.field).getPointer().isNil() { - // Required field is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name} - } - continue - } - } - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) - if err != nil { - if err1, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name + "." + err1.field} - } - continue - } - if err == errRepeatedHasNil { - err = errors.New("proto: repeated field " + f.name + " has nil element") - } - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return b, err - } - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - b = append(b, s...) - } - return b, errLater -} - -// computeMarshalInfo initializes the marshal info. -func (u *marshalInfo) computeMarshalInfo() { - u.Lock() - defer u.Unlock() - if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock - return - } - - t := u.typ - u.unrecognized = invalidField - u.extensions = invalidField - u.v1extensions = invalidField - u.sizecache = invalidField - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if reflect.PtrTo(t).Implements(marshalerType) { - u.hasmarshaler = true - atomic.StoreInt32(&u.initialized, 1) - return - } - - // get oneof implementers - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - - n := t.NumField() - - // deal with XXX fields first - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if !strings.HasPrefix(f.Name, "XXX_") { - continue - } - switch f.Name { - case "XXX_sizecache": - u.sizecache = toField(&f) - case "XXX_unrecognized": - u.unrecognized = toField(&f) - case "XXX_InternalExtensions": - u.extensions = toField(&f) - u.messageset = f.Tag.Get("protobuf_messageset") == "1" - case "XXX_extensions": - u.v1extensions = toField(&f) - case "XXX_NoUnkeyedLiteral": - // nothing to do - default: - panic("unknown XXX field: " + f.Name) - } - n-- - } - - // normal fields - fields := make([]marshalFieldInfo, n) // batch allocation - u.fields = make([]*marshalFieldInfo, 0, n) - for i, j := 0, 0; i < t.NumField(); i++ { - f := t.Field(i) - - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - field := &fields[j] - j++ - field.name = f.Name - u.fields = append(u.fields, field) - if f.Tag.Get("protobuf_oneof") != "" { - field.computeOneofFieldInfo(&f, oneofImplementers) - continue - } - if f.Tag.Get("protobuf") == "" { - // field has no tag (not in generated message), ignore it - u.fields = u.fields[:len(u.fields)-1] - j-- - continue - } - field.computeMarshalFieldInfo(&f) - } - - // fields are marshaled in tag order on the wire. - sort.Sort(byTag(u.fields)) - - atomic.StoreInt32(&u.initialized, 1) -} - -// helper for sorting fields by tag -type byTag []*marshalFieldInfo - -func (a byTag) Len() int { return len(a) } -func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } - -// getExtElemInfo returns the information to marshal an extension element. -// The info it returns is initialized. -func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { - // get from cache first - u.RLock() - e, ok := u.extElems[desc.Field] - u.RUnlock() - if ok { - return e - } - - t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct - tags := strings.Split(desc.Tag, ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct { - t = t.Elem() - } - sizer, marshaler := typeMarshaler(t, tags, false, false) - var deref bool - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - t = reflect.PtrTo(t) - deref = true - } - e = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, - isptr: t.Kind() == reflect.Ptr, - deref: deref, - } - - // update cache - u.Lock() - if u.extElems == nil { - u.extElems = make(map[int32]*marshalElemInfo) - } - u.extElems[desc.Field] = e - u.Unlock() - return e -} - -// computeMarshalFieldInfo fills up the information to marshal a field. -func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { - // parse protobuf tag of the field. - // tag has format of "bytes,49,opt,name=foo,def=hello!" - tags := strings.Split(f.Tag.Get("protobuf"), ",") - if tags[0] == "" { - return - } - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if tags[2] == "req" { - fi.required = true - } - fi.setTag(f, tag, wt) - fi.setMarshaler(f, tags) -} - -func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { - fi.field = toField(f) - fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. - fi.isPointer = true - fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) - fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) - - ityp := f.Type // interface type - for _, o := range oneofImplementers { - t := reflect.TypeOf(o) - if !t.Implements(ityp) { - continue - } - sf := t.Elem().Field(0) // oneof implementer is a struct with a single field - tags := strings.Split(sf.Tag.Get("protobuf"), ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value - fi.oneofElems[t.Elem()] = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, - } - } -} - -// wiretype returns the wire encoding of the type. -func wiretype(encoding string) uint64 { - switch encoding { - case "fixed32": - return WireFixed32 - case "fixed64": - return WireFixed64 - case "varint", "zigzag32", "zigzag64": - return WireVarint - case "bytes": - return WireBytes - case "group": - return WireStartGroup - } - panic("unknown wire type " + encoding) -} - -// setTag fills up the tag (in wire format) and its size in the info of a field. -func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { - fi.field = toField(f) - fi.wiretag = uint64(tag)<<3 | wt - fi.tagsize = SizeVarint(uint64(tag) << 3) -} - -// setMarshaler fills up the sizer and marshaler in the info of a field. -func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { - switch f.Type.Kind() { - case reflect.Map: - // map field - fi.isPointer = true - fi.sizer, fi.marshaler = makeMapMarshaler(f) - return - case reflect.Ptr, reflect.Slice: - fi.isPointer = true - } - fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) -} - -// typeMarshaler returns the sizer and marshaler of a given field. -// t is the type of the field. -// tags is the generated "protobuf" tag of the field. -// If nozero is true, zero value is not marshaled to the wire. -// If oneof is true, it is a oneof field. -func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { - encoding := tags[0] - - pointer := false - slice := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - packed := false - proto3 := false - validateUTF8 := true - for i := 2; i < len(tags); i++ { - if tags[i] == "packed" { - packed = true - } - if tags[i] == "proto3" { - proto3 = true - } - } - validateUTF8 = validateUTF8 && proto3 - - switch t.Kind() { - case reflect.Bool: - if pointer { - return sizeBoolPtr, appendBoolPtr - } - if slice { - if packed { - return sizeBoolPackedSlice, appendBoolPackedSlice - } - return sizeBoolSlice, appendBoolSlice - } - if nozero { - return sizeBoolValueNoZero, appendBoolValueNoZero - } - return sizeBoolValue, appendBoolValue - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixed32Ptr, appendFixed32Ptr - } - if slice { - if packed { - return sizeFixed32PackedSlice, appendFixed32PackedSlice - } - return sizeFixed32Slice, appendFixed32Slice - } - if nozero { - return sizeFixed32ValueNoZero, appendFixed32ValueNoZero - } - return sizeFixed32Value, appendFixed32Value - case "varint": - if pointer { - return sizeVarint32Ptr, appendVarint32Ptr - } - if slice { - if packed { - return sizeVarint32PackedSlice, appendVarint32PackedSlice - } - return sizeVarint32Slice, appendVarint32Slice - } - if nozero { - return sizeVarint32ValueNoZero, appendVarint32ValueNoZero - } - return sizeVarint32Value, appendVarint32Value - } - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixedS32Ptr, appendFixedS32Ptr - } - if slice { - if packed { - return sizeFixedS32PackedSlice, appendFixedS32PackedSlice - } - return sizeFixedS32Slice, appendFixedS32Slice - } - if nozero { - return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero - } - return sizeFixedS32Value, appendFixedS32Value - case "varint": - if pointer { - return sizeVarintS32Ptr, appendVarintS32Ptr - } - if slice { - if packed { - return sizeVarintS32PackedSlice, appendVarintS32PackedSlice - } - return sizeVarintS32Slice, appendVarintS32Slice - } - if nozero { - return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero - } - return sizeVarintS32Value, appendVarintS32Value - case "zigzag32": - if pointer { - return sizeZigzag32Ptr, appendZigzag32Ptr - } - if slice { - if packed { - return sizeZigzag32PackedSlice, appendZigzag32PackedSlice - } - return sizeZigzag32Slice, appendZigzag32Slice - } - if nozero { - return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero - } - return sizeZigzag32Value, appendZigzag32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixed64Ptr, appendFixed64Ptr - } - if slice { - if packed { - return sizeFixed64PackedSlice, appendFixed64PackedSlice - } - return sizeFixed64Slice, appendFixed64Slice - } - if nozero { - return sizeFixed64ValueNoZero, appendFixed64ValueNoZero - } - return sizeFixed64Value, appendFixed64Value - case "varint": - if pointer { - return sizeVarint64Ptr, appendVarint64Ptr - } - if slice { - if packed { - return sizeVarint64PackedSlice, appendVarint64PackedSlice - } - return sizeVarint64Slice, appendVarint64Slice - } - if nozero { - return sizeVarint64ValueNoZero, appendVarint64ValueNoZero - } - return sizeVarint64Value, appendVarint64Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixedS64Ptr, appendFixedS64Ptr - } - if slice { - if packed { - return sizeFixedS64PackedSlice, appendFixedS64PackedSlice - } - return sizeFixedS64Slice, appendFixedS64Slice - } - if nozero { - return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero - } - return sizeFixedS64Value, appendFixedS64Value - case "varint": - if pointer { - return sizeVarintS64Ptr, appendVarintS64Ptr - } - if slice { - if packed { - return sizeVarintS64PackedSlice, appendVarintS64PackedSlice - } - return sizeVarintS64Slice, appendVarintS64Slice - } - if nozero { - return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero - } - return sizeVarintS64Value, appendVarintS64Value - case "zigzag64": - if pointer { - return sizeZigzag64Ptr, appendZigzag64Ptr - } - if slice { - if packed { - return sizeZigzag64PackedSlice, appendZigzag64PackedSlice - } - return sizeZigzag64Slice, appendZigzag64Slice - } - if nozero { - return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero - } - return sizeZigzag64Value, appendZigzag64Value - } - case reflect.Float32: - if pointer { - return sizeFloat32Ptr, appendFloat32Ptr - } - if slice { - if packed { - return sizeFloat32PackedSlice, appendFloat32PackedSlice - } - return sizeFloat32Slice, appendFloat32Slice - } - if nozero { - return sizeFloat32ValueNoZero, appendFloat32ValueNoZero - } - return sizeFloat32Value, appendFloat32Value - case reflect.Float64: - if pointer { - return sizeFloat64Ptr, appendFloat64Ptr - } - if slice { - if packed { - return sizeFloat64PackedSlice, appendFloat64PackedSlice - } - return sizeFloat64Slice, appendFloat64Slice - } - if nozero { - return sizeFloat64ValueNoZero, appendFloat64ValueNoZero - } - return sizeFloat64Value, appendFloat64Value - case reflect.String: - if validateUTF8 { - if pointer { - return sizeStringPtr, appendUTF8StringPtr - } - if slice { - return sizeStringSlice, appendUTF8StringSlice - } - if nozero { - return sizeStringValueNoZero, appendUTF8StringValueNoZero - } - return sizeStringValue, appendUTF8StringValue - } - if pointer { - return sizeStringPtr, appendStringPtr - } - if slice { - return sizeStringSlice, appendStringSlice - } - if nozero { - return sizeStringValueNoZero, appendStringValueNoZero - } - return sizeStringValue, appendStringValue - case reflect.Slice: - if slice { - return sizeBytesSlice, appendBytesSlice - } - if oneof { - // Oneof bytes field may also have "proto3" tag. - // We want to marshal it as a oneof field. Do this - // check before the proto3 check. - return sizeBytesOneof, appendBytesOneof - } - if proto3 { - return sizeBytes3, appendBytes3 - } - return sizeBytes, appendBytes - case reflect.Struct: - switch encoding { - case "group": - if slice { - return makeGroupSliceMarshaler(getMarshalInfo(t)) - } - return makeGroupMarshaler(getMarshalInfo(t)) - case "bytes": - if slice { - return makeMessageSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageMarshaler(getMarshalInfo(t)) - } - } - panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) -} - -// Below are functions to size/marshal a specific type of a field. -// They are stored in the field's info, and called by function pointers. -// They have type sizer or marshaler. - -func sizeFixed32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixedS32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFloat32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - return (4 + tagsize) * len(s) -} -func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixed64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFixedS64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFloat64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - return (8 + tagsize) * len(s) -} -func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeVarint32Value(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarint32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarint64Value(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - return SizeVarint(v) + tagsize -} -func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return SizeVarint(v) + tagsize -} -func sizeVarint64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return SizeVarint(*p) + tagsize -} -func sizeVarint64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(v) + tagsize - } - return n -} -func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize - } - return n -} -func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize - } - return n -} -func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeBoolValue(_ pointer, tagsize int) int { - return 1 + tagsize -} -func sizeBoolValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toBool() - if !v { - return 0 - } - return 1 + tagsize -} -func sizeBoolPtr(ptr pointer, tagsize int) int { - p := *ptr.toBoolPtr() - if p == nil { - return 0 - } - return 1 + tagsize -} -func sizeBoolSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - return (1 + tagsize) * len(s) -} -func sizeBoolPackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return 0 - } - return len(s) + SizeVarint(uint64(len(s))) + tagsize -} -func sizeStringValue(ptr pointer, tagsize int) int { - v := *ptr.toString() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toString() - if v == "" { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringPtr(ptr pointer, tagsize int) int { - p := *ptr.toStringPtr() - if p == nil { - return 0 - } - v := *p - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringSlice(ptr pointer, tagsize int) int { - s := *ptr.toStringSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} -func sizeBytes(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if v == nil { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytes3(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if len(v) == 0 { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesOneof(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesSlice(ptr pointer, tagsize int) int { - s := *ptr.toBytesSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} - -// appendFixed32 appends an encoded fixed32 to b. -func appendFixed32(b []byte, v uint32) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24)) - return b -} - -// appendFixed64 appends an encoded fixed64 to b. -func appendFixed64(b []byte, v uint64) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24), - byte(v>>32), - byte(v>>40), - byte(v>>48), - byte(v>>56)) - return b -} - -// appendVarint appends an encoded varint to b. -func appendVarint(b []byte, v uint64) []byte { - // TODO: make 1-byte (maybe 2-byte) case inline-able, once we - // have non-leaf inliner. - switch { - case v < 1<<7: - b = append(b, byte(v)) - case v < 1<<14: - b = append(b, - byte(v&0x7f|0x80), - byte(v>>7)) - case v < 1<<21: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte(v>>14)) - case v < 1<<28: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte(v>>21)) - case v < 1<<35: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte(v>>28)) - case v < 1<<42: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte(v>>35)) - case v < 1<<49: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte(v>>42)) - case v < 1<<56: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte(v>>49)) - case v < 1<<63: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte(v>>56)) - default: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte((v>>56)&0x7f|0x80), - 1) - } - return b -} - -func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, *p) - return b, nil -} -func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(*p)) - return b, nil -} -func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(*p)) - return b, nil -} -func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, *p) - return b, nil -} -func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(*p)) - return b, nil -} -func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(*p)) - return b, nil -} -func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, *p) - return b, nil -} -func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - } - return b, nil -} -func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, v) - } - return b, nil -} -func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - if !v { - return b, nil - } - b = appendVarint(b, wiretag) - b = append(b, 1) - return b, nil -} - -func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toBoolPtr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - if *p { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(len(s))) - for _, v := range s { - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - if v == "" { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toStringSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} -func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if v == "" { - return b, nil - } - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - s := *ptr.toStringSlice() - for _, v := range s { - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if v == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if len(v) == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBytesSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} - -// makeGroupMarshaler returns the sizer and marshaler for a group. -// u is the marshal info of the underlying message. -func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - return u.size(p) + 2*tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - var err error - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, p, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - return b, err - } -} - -// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. -// u is the marshal info of the underlying message. -func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - n += u.size(v) + 2*tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, v, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMessageMarshaler returns the sizer and marshaler for a message field. -// u is the marshal info of the message. -func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.size(p) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(p) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, p, deterministic) - } -} - -// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. -// u is the marshal info of the message. -func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMapMarshaler returns the sizer and marshaler for a map field. -// f is the pointer to the reflect data structure of the field. -func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { - // figure out key and value type - t := f.Type - keyType := t.Key() - valType := t.Elem() - keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map - valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map - keyWireTag := 1<<3 | wiretype(keyTags[0]) - valWireTag := 2<<3 | wiretype(valTags[0]) - - // We create an interface to get the addresses of the map key and value. - // If value is pointer-typed, the interface is a direct interface, the - // idata itself is the value. Otherwise, the idata is the pointer to the - // value. - // Key cannot be pointer-typed. - valIsPtr := valType.Kind() == reflect.Ptr - - // If value is a message with nested maps, calling - // valSizer in marshal may be quadratic. We should use - // cached version in marshal (but not in size). - // If value is not message type, we don't have size cache, - // but it cannot be nested either. Just use valSizer. - valCachedSizer := valSizer - if valIsPtr && valType.Elem().Kind() == reflect.Struct { - u := getMarshalInfo(valType.Elem()) - valCachedSizer = func(ptr pointer, tagsize int) int { - // Same as message sizer, but use cache. - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.cachedsize(p) - return siz + SizeVarint(uint64(siz)) + tagsize - } - } - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(t).Elem() // the map - n := 0 - for _, k := range m.MapKeys() { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value - siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(t).Elem() // the map - var err error - keys := m.MapKeys() - if len(keys) > 1 && deterministic { - sort.Sort(mapKeys(keys)) - } - - var nerr nonFatal - for _, k := range keys { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value - b = appendVarint(b, tag) - siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - b = appendVarint(b, uint64(siz)) - b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) - if !nerr.Merge(err) { - return b, err - } - b, err = valMarshaler(b, vaddr, valWireTag, deterministic) - if err != ErrNil && !nerr.Merge(err) { // allow nil value in map - return b, err - } - } - return b, nerr.E - } -} - -// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. -// fi is the marshal info of the field. -// f is the pointer to the reflect data structure of the field. -func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { - // Oneof field is an interface. We need to get the actual data type on the fly. - t := f.Type - return func(ptr pointer, _ int) int { - p := ptr.getInterfacePointer() - if p.isNil() { - return 0 - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - e := fi.oneofElems[telem] - return e.sizer(p, e.tagsize) - }, - func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { - p := ptr.getInterfacePointer() - if p.isNil() { - return b, nil - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { - return b, errOneofHasNil - } - e := fi.oneofElems[telem] - return e.marshaler(b, p, e.wiretag, deterministic) - } -} - -// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. -func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, ei.tagsize) - } - mu.Unlock() - return n -} - -// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. -func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - // Not sure this is required, but the old code does it. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// message set format is: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } - -// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field -// in message set format (above). -func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for id, e := range m { - n += 2 // start group, end group. tag = 1 (size=1) - n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - siz := len(msgWithLen) - n += siz + 1 // message, tag = 3 (size=1) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, 1) // message, tag = 3 (size=1) - } - mu.Unlock() - return n -} - -// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) -// to the end of byte slice b. -func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for id, e := range m { - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - if !nerr.Merge(err) { - return b, err - } - b = append(b, 1<<3|WireEndGroup) - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, id := range keys { - e := m[int32(id)] - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - b = append(b, 1<<3|WireEndGroup) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// sizeV1Extensions computes the size of encoded data for a V1-API extension field. -func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { - if m == nil { - return 0 - } - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, ei.tagsize) - } - return n -} - -// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. -func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { - if m == nil { - return b, nil - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - var err error - var nerr nonFatal - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// newMarshaler is the interface representing objects that can marshal themselves. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newMarshaler interface { - XXX_Size() int - XXX_Marshal(b []byte, deterministic bool) ([]byte, error) -} - -// Size returns the encoded size of a protocol buffer message. -// This is the main entry point. -func Size(pb Message) int { - if m, ok := pb.(newMarshaler); ok { - return m.XXX_Size() - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, _ := m.Marshal() - return len(b) - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return 0 - } - var info InternalMessageInfo - return info.Size(pb) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, returning the data. -// This is the main entry point. -func Marshal(pb Message) ([]byte, error) { - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - b := make([]byte, 0, siz) - return m.XXX_Marshal(b, false) - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - return m.Marshal() - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return nil, ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - b := make([]byte, 0, siz) - return info.Marshal(b, pb, false) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, writing the result to the -// Buffer. -// This is an alternative entry point. It is not necessary to use -// a Buffer for most applications. -func (p *Buffer) Marshal(pb Message) error { - var err error - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - p.grow(siz) // make sure buf has enough capacity - p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) - return err - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, err := m.Marshal() - p.buf = append(p.buf, b...) - return err - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - p.grow(siz) // make sure buf has enough capacity - p.buf, err = info.Marshal(p.buf, pb, p.deterministic) - return err -} - -// grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After grow(n), at least n bytes can be written to the -// buffer without another allocation. -func (p *Buffer) grow(n int) { - need := len(p.buf) + n - if need <= cap(p.buf) { - return - } - newCap := len(p.buf) * 2 - if newCap < need { - newCap = need - } - p.buf = append(make([]byte, 0, newCap), p.buf...) -} diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go deleted file mode 100644 index 5525def..0000000 --- a/vendor/github.com/golang/protobuf/proto/table_merge.go +++ /dev/null @@ -1,654 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -// Merge merges the src message into dst. -// This assumes that dst and src of the same type and are non-nil. -func (a *InternalMessageInfo) Merge(dst, src Message) { - mi := atomicLoadMergeInfo(&a.merge) - if mi == nil { - mi = getMergeInfo(reflect.TypeOf(dst).Elem()) - atomicStoreMergeInfo(&a.merge, mi) - } - mi.merge(toPointer(&dst), toPointer(&src)) -} - -type mergeInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []mergeFieldInfo - unrecognized field // Offset of XXX_unrecognized -} - -type mergeFieldInfo struct { - field field // Offset of field, guaranteed to be valid - - // isPointer reports whether the value in the field is a pointer. - // This is true for the following situations: - // * Pointer to struct - // * Pointer to basic type (proto2 only) - // * Slice (first value in slice header is a pointer) - // * String (first value in string header is a pointer) - isPointer bool - - // basicWidth reports the width of the field assuming that it is directly - // embedded in the struct (as is the case for basic types in proto3). - // The possible values are: - // 0: invalid - // 1: bool - // 4: int32, uint32, float32 - // 8: int64, uint64, float64 - basicWidth int - - // Where dst and src are pointers to the types being merged. - merge func(dst, src pointer) -} - -var ( - mergeInfoMap = map[reflect.Type]*mergeInfo{} - mergeInfoLock sync.Mutex -) - -func getMergeInfo(t reflect.Type) *mergeInfo { - mergeInfoLock.Lock() - defer mergeInfoLock.Unlock() - mi := mergeInfoMap[t] - if mi == nil { - mi = &mergeInfo{typ: t} - mergeInfoMap[t] = mi - } - return mi -} - -// merge merges src into dst assuming they are both of type *mi.typ. -func (mi *mergeInfo) merge(dst, src pointer) { - if dst.isNil() { - panic("proto: nil destination") - } - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&mi.initialized) == 0 { - mi.computeMergeInfo() - } - - for _, fi := range mi.fields { - sfp := src.offset(fi.field) - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string - continue - } - if fi.basicWidth > 0 { - switch { - case fi.basicWidth == 1 && !*sfp.toBool(): - continue - case fi.basicWidth == 4 && *sfp.toUint32() == 0: - continue - case fi.basicWidth == 8 && *sfp.toUint64() == 0: - continue - } - } - } - - dfp := dst.offset(fi.field) - fi.merge(dfp, sfp) - } - - // TODO: Make this faster? - out := dst.asPointerTo(mi.typ).Elem() - in := src.asPointerTo(mi.typ).Elem() - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - if mi.unrecognized.IsValid() { - if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { - *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) - } - } -} - -func (mi *mergeInfo) computeMergeInfo() { - mi.lock.Lock() - defer mi.lock.Unlock() - if mi.initialized != 0 { - return - } - t := mi.typ - n := t.NumField() - - props := GetProperties(t) - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - mfi := mergeFieldInfo{field: toField(&f)} - tf := f.Type - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - switch tf.Kind() { - case reflect.Ptr, reflect.Slice, reflect.String: - // As a special case, we assume slices and strings are pointers - // since we know that the first field in the SliceSlice or - // StringHeader is a data pointer. - mfi.isPointer = true - case reflect.Bool: - mfi.basicWidth = 1 - case reflect.Int32, reflect.Uint32, reflect.Float32: - mfi.basicWidth = 4 - case reflect.Int64, reflect.Uint64, reflect.Float64: - mfi.basicWidth = 8 - } - } - - // Unwrap tf to get at its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + tf.Name()) - } - - switch tf.Kind() { - case reflect.Int32: - switch { - case isSlice: // E.g., []int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Slice is not defined (see pointer_reflect.go). - /* - sfsp := src.toInt32Slice() - if *sfsp != nil { - dfsp := dst.toInt32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - */ - sfs := src.getInt32Slice() - if sfs != nil { - dfs := dst.getInt32Slice() - dfs = append(dfs, sfs...) - if dfs == nil { - dfs = []int32{} - } - dst.setInt32Slice(dfs) - } - } - case isPointer: // E.g., *int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). - /* - sfpp := src.toInt32Ptr() - if *sfpp != nil { - dfpp := dst.toInt32Ptr() - if *dfpp == nil { - *dfpp = Int32(**sfpp) - } else { - **dfpp = **sfpp - } - } - */ - sfp := src.getInt32Ptr() - if sfp != nil { - dfp := dst.getInt32Ptr() - if dfp == nil { - dst.setInt32Ptr(*sfp) - } else { - *dfp = *sfp - } - } - } - default: // E.g., int32 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt32(); v != 0 { - *dst.toInt32() = v - } - } - } - case reflect.Int64: - switch { - case isSlice: // E.g., []int64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toInt64Slice() - if *sfsp != nil { - dfsp := dst.toInt64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - } - case isPointer: // E.g., *int64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toInt64Ptr() - if *sfpp != nil { - dfpp := dst.toInt64Ptr() - if *dfpp == nil { - *dfpp = Int64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., int64 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt64(); v != 0 { - *dst.toInt64() = v - } - } - } - case reflect.Uint32: - switch { - case isSlice: // E.g., []uint32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint32Slice() - if *sfsp != nil { - dfsp := dst.toUint32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint32{} - } - } - } - case isPointer: // E.g., *uint32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint32Ptr() - if *sfpp != nil { - dfpp := dst.toUint32Ptr() - if *dfpp == nil { - *dfpp = Uint32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint32 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint32(); v != 0 { - *dst.toUint32() = v - } - } - } - case reflect.Uint64: - switch { - case isSlice: // E.g., []uint64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint64Slice() - if *sfsp != nil { - dfsp := dst.toUint64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint64{} - } - } - } - case isPointer: // E.g., *uint64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint64Ptr() - if *sfpp != nil { - dfpp := dst.toUint64Ptr() - if *dfpp == nil { - *dfpp = Uint64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint64 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint64(); v != 0 { - *dst.toUint64() = v - } - } - } - case reflect.Float32: - switch { - case isSlice: // E.g., []float32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat32Slice() - if *sfsp != nil { - dfsp := dst.toFloat32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float32{} - } - } - } - case isPointer: // E.g., *float32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat32Ptr() - if *sfpp != nil { - dfpp := dst.toFloat32Ptr() - if *dfpp == nil { - *dfpp = Float32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float32 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat32(); v != 0 { - *dst.toFloat32() = v - } - } - } - case reflect.Float64: - switch { - case isSlice: // E.g., []float64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat64Slice() - if *sfsp != nil { - dfsp := dst.toFloat64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float64{} - } - } - } - case isPointer: // E.g., *float64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat64Ptr() - if *sfpp != nil { - dfpp := dst.toFloat64Ptr() - if *dfpp == nil { - *dfpp = Float64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float64 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat64(); v != 0 { - *dst.toFloat64() = v - } - } - } - case reflect.Bool: - switch { - case isSlice: // E.g., []bool - mfi.merge = func(dst, src pointer) { - sfsp := src.toBoolSlice() - if *sfsp != nil { - dfsp := dst.toBoolSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []bool{} - } - } - } - case isPointer: // E.g., *bool - mfi.merge = func(dst, src pointer) { - sfpp := src.toBoolPtr() - if *sfpp != nil { - dfpp := dst.toBoolPtr() - if *dfpp == nil { - *dfpp = Bool(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., bool - mfi.merge = func(dst, src pointer) { - if v := *src.toBool(); v { - *dst.toBool() = v - } - } - } - case reflect.String: - switch { - case isSlice: // E.g., []string - mfi.merge = func(dst, src pointer) { - sfsp := src.toStringSlice() - if *sfsp != nil { - dfsp := dst.toStringSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []string{} - } - } - } - case isPointer: // E.g., *string - mfi.merge = func(dst, src pointer) { - sfpp := src.toStringPtr() - if *sfpp != nil { - dfpp := dst.toStringPtr() - if *dfpp == nil { - *dfpp = String(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., string - mfi.merge = func(dst, src pointer) { - if v := *src.toString(); v != "" { - *dst.toString() = v - } - } - } - case reflect.Slice: - isProto3 := props.Prop[i].proto3 - switch { - case isPointer: - panic("bad pointer in byte slice case in " + tf.Name()) - case tf.Elem().Kind() != reflect.Uint8: - panic("bad element kind in byte slice case in " + tf.Name()) - case isSlice: // E.g., [][]byte - mfi.merge = func(dst, src pointer) { - sbsp := src.toBytesSlice() - if *sbsp != nil { - dbsp := dst.toBytesSlice() - for _, sb := range *sbsp { - if sb == nil { - *dbsp = append(*dbsp, nil) - } else { - *dbsp = append(*dbsp, append([]byte{}, sb...)) - } - } - if *dbsp == nil { - *dbsp = [][]byte{} - } - } - } - default: // E.g., []byte - mfi.merge = func(dst, src pointer) { - sbp := src.toBytes() - if *sbp != nil { - dbp := dst.toBytes() - if !isProto3 || len(*sbp) > 0 { - *dbp = append([]byte{}, *sbp...) - } - } - } - } - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("message field %s without pointer", tf)) - case isSlice: // E.g., []*pb.T - mi := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sps := src.getPointerSlice() - if sps != nil { - dps := dst.getPointerSlice() - for _, sp := range sps { - var dp pointer - if !sp.isNil() { - dp = valToPointer(reflect.New(tf)) - mi.merge(dp, sp) - } - dps = append(dps, dp) - } - if dps == nil { - dps = []pointer{} - } - dst.setPointerSlice(dps) - } - } - default: // E.g., *pb.T - mi := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sp := src.getPointer() - if !sp.isNil() { - dp := dst.getPointer() - if dp.isNil() { - dp = valToPointer(reflect.New(tf)) - dst.setPointer(dp) - } - mi.merge(dp, sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic("bad pointer or slice in map case in " + tf.Name()) - default: // E.g., map[K]V - mfi.merge = func(dst, src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - dm := dst.asPointerTo(tf).Elem() - if dm.IsNil() { - dm.Set(reflect.MakeMap(tf)) - } - - switch tf.Elem().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(Clone(val.Interface().(Message))) - dm.SetMapIndex(key, val) - } - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - dm.SetMapIndex(key, val) - } - default: // Basic type (e.g., string) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - dm.SetMapIndex(key, val) - } - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic("bad pointer or slice in interface case in " + tf.Name()) - default: // E.g., interface{} - // TODO: Make this faster? - mfi.merge = func(dst, src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - du := dst.asPointerTo(tf).Elem() - typ := su.Elem().Type() - if du.IsNil() || du.Elem().Type() != typ { - du.Set(reflect.New(typ.Elem())) // Initialize interface if empty - } - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - dv := du.Elem().Elem().Field(0) - if dv.Kind() == reflect.Ptr && dv.IsNil() { - dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - Merge(dv.Interface().(Message), sv.Interface().(Message)) - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) - default: // Basic type (e.g., string) - dv.Set(sv) - } - } - } - } - default: - panic(fmt.Sprintf("merger not found for type:%s", tf)) - } - mi.fields = append(mi.fields, mfi) - } - - mi.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - mi.unrecognized = toField(&f) - } - - atomic.StoreInt32(&mi.initialized, 1) -} diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go deleted file mode 100644 index acee2fc..0000000 --- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go +++ /dev/null @@ -1,2053 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// Unmarshal is the entry point from the generated .pb.go files. -// This function is not intended to be used by non-generated code. -// This function is not subject to any compatibility guarantee. -// msg contains a pointer to a protocol buffer struct. -// b is the data to be unmarshaled into the protocol buffer. -// a is a pointer to a place to store cached unmarshal information. -func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { - // Load the unmarshal information for this message type. - // The atomic load ensures memory consistency. - u := atomicLoadUnmarshalInfo(&a.unmarshal) - if u == nil { - // Slow path: find unmarshal info for msg, update a with it. - u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) - atomicStoreUnmarshalInfo(&a.unmarshal, u) - } - // Then do the unmarshaling. - err := u.unmarshal(toPointer(&msg), b) - return err -} - -type unmarshalInfo struct { - typ reflect.Type // type of the protobuf struct - - // 0 = only typ field is initialized - // 1 = completely initialized - initialized int32 - lock sync.Mutex // prevents double initialization - dense []unmarshalFieldInfo // fields indexed by tag # - sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # - reqFields []string // names of required fields - reqMask uint64 // 1< 0 { - // Read tag and wire type. - // Special case 1 and 2 byte varints. - var x uint64 - if b[0] < 128 { - x = uint64(b[0]) - b = b[1:] - } else if len(b) >= 2 && b[1] < 128 { - x = uint64(b[0]&0x7f) + uint64(b[1])<<7 - b = b[2:] - } else { - var n int - x, n = decodeVarint(b) - if n == 0 { - return io.ErrUnexpectedEOF - } - b = b[n:] - } - tag := x >> 3 - wire := int(x) & 7 - - // Dispatch on the tag to one of the unmarshal* functions below. - var f unmarshalFieldInfo - if tag < uint64(len(u.dense)) { - f = u.dense[tag] - } else { - f = u.sparse[tag] - } - if fn := f.unmarshal; fn != nil { - var err error - b, err = fn(b, m.offset(f.field), wire) - if err == nil { - reqMask |= f.reqMask - continue - } - if r, ok := err.(*RequiredNotSetError); ok { - // Remember this error, but keep parsing. We need to produce - // a full parse even if a required field is missing. - if errLater == nil { - errLater = r - } - reqMask |= f.reqMask - continue - } - if err != errInternalBadWireType { - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return err - } - // Fragments with bad wire type are treated as unknown fields. - } - - // Unknown tag. - if !u.unrecognized.IsValid() { - // Don't keep unrecognized data; just skip it. - var err error - b, err = skipField(b, wire) - if err != nil { - return err - } - continue - } - // Keep unrecognized data around. - // maybe in extensions, maybe in the unrecognized field. - z := m.offset(u.unrecognized).toBytes() - var emap map[int32]Extension - var e Extension - for _, r := range u.extensionRanges { - if uint64(r.Start) <= tag && tag <= uint64(r.End) { - if u.extensions.IsValid() { - mp := m.offset(u.extensions).toExtensions() - emap = mp.extensionsWrite() - e = emap[int32(tag)] - z = &e.enc - break - } - if u.oldExtensions.IsValid() { - p := m.offset(u.oldExtensions).toOldExtensions() - emap = *p - if emap == nil { - emap = map[int32]Extension{} - *p = emap - } - e = emap[int32(tag)] - z = &e.enc - break - } - panic("no extensions field available") - } - } - - // Use wire type to skip data. - var err error - b0 := b - b, err = skipField(b, wire) - if err != nil { - return err - } - *z = encodeVarint(*z, tag<<3|uint64(wire)) - *z = append(*z, b0[:len(b0)-len(b)]...) - - if emap != nil { - emap[int32(tag)] = e - } - } - if reqMask != u.reqMask && errLater == nil { - // A required field of this message is missing. - for _, n := range u.reqFields { - if reqMask&1 == 0 { - errLater = &RequiredNotSetError{n} - } - reqMask >>= 1 - } - } - return errLater -} - -// computeUnmarshalInfo fills in u with information for use -// in unmarshaling protocol buffers of type u.typ. -func (u *unmarshalInfo) computeUnmarshalInfo() { - u.lock.Lock() - defer u.lock.Unlock() - if u.initialized != 0 { - return - } - t := u.typ - n := t.NumField() - - // Set up the "not found" value for the unrecognized byte buffer. - // This is the default for proto3. - u.unrecognized = invalidField - u.extensions = invalidField - u.oldExtensions = invalidField - - // List of the generated type and offset for each oneof field. - type oneofField struct { - ityp reflect.Type // interface type of oneof field - field field // offset in containing message - } - var oneofFields []oneofField - - for i := 0; i < n; i++ { - f := t.Field(i) - if f.Name == "XXX_unrecognized" { - // The byte slice used to hold unrecognized input is special. - if f.Type != reflect.TypeOf(([]byte)(nil)) { - panic("bad type for XXX_unrecognized field: " + f.Type.Name()) - } - u.unrecognized = toField(&f) - continue - } - if f.Name == "XXX_InternalExtensions" { - // Ditto here. - if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { - panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) - } - u.extensions = toField(&f) - if f.Tag.Get("protobuf_messageset") == "1" { - u.isMessageSet = true - } - continue - } - if f.Name == "XXX_extensions" { - // An older form of the extensions field. - if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { - panic("bad type for XXX_extensions field: " + f.Type.Name()) - } - u.oldExtensions = toField(&f) - continue - } - if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { - continue - } - - oneof := f.Tag.Get("protobuf_oneof") - if oneof != "" { - oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) - // The rest of oneof processing happens below. - continue - } - - tags := f.Tag.Get("protobuf") - tagArray := strings.Split(tags, ",") - if len(tagArray) < 2 { - panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) - } - tag, err := strconv.Atoi(tagArray[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tagArray[1]) - } - - name := "" - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - } - - // Extract unmarshaling function from the field (its type and tags). - unmarshal := fieldUnmarshaler(&f) - - // Required field? - var reqMask uint64 - if tagArray[2] == "req" { - bit := len(u.reqFields) - u.reqFields = append(u.reqFields, name) - reqMask = uint64(1) << uint(bit) - // TODO: if we have more than 64 required fields, we end up - // not verifying that all required fields are present. - // Fix this, perhaps using a count of required fields? - } - - // Store the info in the correct slot in the message. - u.setTag(tag, toField(&f), unmarshal, reqMask, name) - } - - // Find any types associated with oneof fields. - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - for _, v := range oneofImplementers { - tptr := reflect.TypeOf(v) // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } - } - - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) - } - } - - } - - // Get extension ranges, if any. - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") - if fn.IsValid() { - if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { - panic("a message with extensions, but no extensions field in " + t.Name()) - } - u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) - } - - // Explicitly disallow tag 0. This will ensure we flag an error - // when decoding a buffer of all zeros. Without this code, we - // would decode and skip an all-zero buffer of even length. - // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. - u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { - return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) - }, 0, "") - - // Set mask for required field check. - u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? - for len(u.dense) <= tag { - u.dense = append(u.dense, unmarshalFieldInfo{}) - } - u.dense[tag] = i - return - } - if u.sparse == nil { - u.sparse = map[uint64]unmarshalFieldInfo{} - } - u.sparse[uint64(tag)] = i -} - -// fieldUnmarshaler returns an unmarshaler for the given field. -func fieldUnmarshaler(f *reflect.StructField) unmarshaler { - if f.Type.Kind() == reflect.Map { - return makeUnmarshalMap(f) - } - return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) -} - -// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. -func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { - tagArray := strings.Split(tags, ",") - encoding := tagArray[0] - name := "unknown" - proto3 := false - validateUTF8 := true - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - if tag == "proto3" { - proto3 = true - } - } - validateUTF8 = validateUTF8 && proto3 - - // Figure out packaging (pointer, slice, or both) - slice := false - pointer := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - // We'll never have both pointer and slice for basic types. - if pointer && slice && t.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + t.Name()) - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return unmarshalBoolPtr - } - if slice { - return unmarshalBoolSlice - } - return unmarshalBoolValue - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixedS32Ptr - } - if slice { - return unmarshalFixedS32Slice - } - return unmarshalFixedS32Value - case "varint": - // this could be int32 or enum - if pointer { - return unmarshalInt32Ptr - } - if slice { - return unmarshalInt32Slice - } - return unmarshalInt32Value - case "zigzag32": - if pointer { - return unmarshalSint32Ptr - } - if slice { - return unmarshalSint32Slice - } - return unmarshalSint32Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixedS64Ptr - } - if slice { - return unmarshalFixedS64Slice - } - return unmarshalFixedS64Value - case "varint": - if pointer { - return unmarshalInt64Ptr - } - if slice { - return unmarshalInt64Slice - } - return unmarshalInt64Value - case "zigzag64": - if pointer { - return unmarshalSint64Ptr - } - if slice { - return unmarshalSint64Slice - } - return unmarshalSint64Value - } - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixed32Ptr - } - if slice { - return unmarshalFixed32Slice - } - return unmarshalFixed32Value - case "varint": - if pointer { - return unmarshalUint32Ptr - } - if slice { - return unmarshalUint32Slice - } - return unmarshalUint32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixed64Ptr - } - if slice { - return unmarshalFixed64Slice - } - return unmarshalFixed64Value - case "varint": - if pointer { - return unmarshalUint64Ptr - } - if slice { - return unmarshalUint64Slice - } - return unmarshalUint64Value - } - case reflect.Float32: - if pointer { - return unmarshalFloat32Ptr - } - if slice { - return unmarshalFloat32Slice - } - return unmarshalFloat32Value - case reflect.Float64: - if pointer { - return unmarshalFloat64Ptr - } - if slice { - return unmarshalFloat64Slice - } - return unmarshalFloat64Value - case reflect.Map: - panic("map type in typeUnmarshaler in " + t.Name()) - case reflect.Slice: - if pointer { - panic("bad pointer in slice case in " + t.Name()) - } - if slice { - return unmarshalBytesSlice - } - return unmarshalBytesValue - case reflect.String: - if validateUTF8 { - if pointer { - return unmarshalUTF8StringPtr - } - if slice { - return unmarshalUTF8StringSlice - } - return unmarshalUTF8StringValue - } - if pointer { - return unmarshalStringPtr - } - if slice { - return unmarshalStringSlice - } - return unmarshalStringValue - case reflect.Struct: - // message or group field - if !pointer { - panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) - } - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) - case "group": - if slice { - return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) - } - } - panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) -} - -// Below are all the unmarshalers for individual fields of various types. - -func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64() = v - return b, nil -} - -func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64() = v - return b, nil -} - -func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64() = v - return b, nil -} - -func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64Ptr() = &v - return b, nil -} - -func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - *f.toInt32() = v - return b, nil -} - -func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - *f.toInt32() = v - return b, nil -} - -func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32() = v - return b, nil -} - -func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32Ptr() = &v - return b, nil -} - -func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64() = v - return b[8:], nil -} - -func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64() = v - return b[8:], nil -} - -func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32() = v - return b[4:], nil -} - -func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32Ptr() = &v - return b[4:], nil -} - -func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - *f.toInt32() = v - return b[4:], nil -} - -func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.setInt32Ptr(v) - return b[4:], nil -} - -func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - return b[4:], nil -} - -func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - // Note: any length varint is allowed, even though any sane - // encoder will use one byte. - // See https://github.com/golang/protobuf/issues/76 - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - // TODO: check if x>1? Tests seem to indicate no. - v := x != 0 - *f.toBool() = v - return b[n:], nil -} - -func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - *f.toBoolPtr() = &v - return b[n:], nil -} - -func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - b = b[n:] - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - return b[n:], nil -} - -func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64() = v - return b[8:], nil -} - -func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64Ptr() = &v - return b[8:], nil -} - -func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32() = v - return b[4:], nil -} - -func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32Ptr() = &v - return b[4:], nil -} - -func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - return b[x:], nil -} - -func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - return b[x:], nil -} - -func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - return b[x:], nil -} - -func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -var emptyBuf [0]byte - -func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // The use of append here is a trick which avoids the zeroing - // that would be required if we used a make/copy pair. - // We append to emptyBuf instead of nil because we want - // a non-nil result even when the length is 0. - v := append(emptyBuf[:], b[:x]...) - *f.toBytes() = v - return b[x:], nil -} - -func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := append(emptyBuf[:], b[:x]...) - s := f.toBytesSlice() - *s = append(*s, v) - return b[x:], nil -} - -func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[y:], err - } -} - -func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[y:], err - } -} - -func makeUnmarshalMap(f *reflect.StructField) unmarshaler { - t := f.Type - kt := t.Key() - vt := t.Elem() - unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) - unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) - return func(b []byte, f pointer, w int) ([]byte, error) { - // The map entry is a submessage. Figure out how big it is. - if w != WireBytes { - return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - r := b[x:] // unused data to return - b = b[:x] // data for map entry - - // Note: we could use #keys * #values ~= 200 functions - // to do map decoding without reflection. Probably not worth it. - // Maps will be somewhat slow. Oh well. - - // Read key and value from data. - var nerr nonFatal - k := reflect.New(kt) - v := reflect.New(vt) - for len(b) > 0 { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - wire := int(x) & 7 - b = b[n:] - - var err error - switch x >> 3 { - case 1: - b, err = unmarshalKey(b, valToPointer(k), wire) - case 2: - b, err = unmarshalVal(b, valToPointer(v), wire) - default: - err = errInternalBadWireType // skip unknown tag - } - - if nerr.Merge(err) { - continue - } - if err != errInternalBadWireType { - return nil, err - } - - // Skip past unknown fields. - b, err = skipField(b, wire) - if err != nil { - return nil, err - } - } - - // Get map, allocate if needed. - m := f.asPointerTo(t).Elem() // an addressable map[K]T - if m.IsNil() { - m.Set(reflect.MakeMap(t)) - } - - // Insert into map. - m.SetMapIndex(k.Elem(), v.Elem()) - - return r, nerr.E - } -} - -// makeUnmarshalOneof makes an unmarshaler for oneof fields. -// for: -// message Msg { -// oneof F { -// int64 X = 1; -// float64 Y = 2; -// } -// } -// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). -// ityp is the interface type of the oneof field (e.g. isMsg_F). -// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). -// Note that this function will be called once for each case in the oneof. -func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { - sf := typ.Field(0) - field0 := toField(&sf) - return func(b []byte, f pointer, w int) ([]byte, error) { - // Allocate holder for value. - v := reflect.New(typ) - - // Unmarshal data into holder. - // We unmarshal into the first field of the holder object. - var err error - var nerr nonFatal - b, err = unmarshal(b, valToPointer(v).offset(field0), w) - if !nerr.Merge(err) { - return nil, err - } - - // Write pointer to holder into target field. - f.asPointerTo(ityp).Elem().Set(v) - - return b, nerr.E - } -} - -// Error used by decode internally. -var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") - -// skipField skips past a field of type wire and returns the remaining bytes. -func skipField(b []byte, wire int) ([]byte, error) { - switch wire { - case WireVarint: - _, k := decodeVarint(b) - if k == 0 { - return b, io.ErrUnexpectedEOF - } - b = b[k:] - case WireFixed32: - if len(b) < 4 { - return b, io.ErrUnexpectedEOF - } - b = b[4:] - case WireFixed64: - if len(b) < 8 { - return b, io.ErrUnexpectedEOF - } - b = b[8:] - case WireBytes: - m, k := decodeVarint(b) - if k == 0 || uint64(len(b)-k) < m { - return b, io.ErrUnexpectedEOF - } - b = b[uint64(k)+m:] - case WireStartGroup: - _, i := findEndGroup(b) - if i == -1 { - return b, io.ErrUnexpectedEOF - } - b = b[i:] - default: - return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) - } - return b, nil -} - -// findEndGroup finds the index of the next EndGroup tag. -// Groups may be nested, so the "next" EndGroup tag is the first -// unpaired EndGroup. -// findEndGroup returns the indexes of the start and end of the EndGroup tag. -// Returns (-1,-1) if it can't find one. -func findEndGroup(b []byte) (int, int) { - depth := 1 - i := 0 - for { - x, n := decodeVarint(b[i:]) - if n == 0 { - return -1, -1 - } - j := i - i += n - switch x & 7 { - case WireVarint: - _, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - case WireFixed32: - if len(b)-4 < i { - return -1, -1 - } - i += 4 - case WireFixed64: - if len(b)-8 < i { - return -1, -1 - } - i += 8 - case WireBytes: - m, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - if uint64(len(b)-i) < m { - return -1, -1 - } - i += int(m) - case WireStartGroup: - depth++ - case WireEndGroup: - depth-- - if depth == 0 { - return j, i - } - default: - return -1, -1 - } - } -} - -// encodeVarint appends a varint-encoded integer to b and returns the result. -func encodeVarint(b []byte, x uint64) []byte { - for x >= 1<<7 { - b = append(b, byte(x&0x7f|0x80)) - x >>= 7 - } - return append(b, byte(x)) -} - -// decodeVarint reads a varint-encoded integer from b. -// Returns the decoded integer and the number of bytes read. -// If there is an error, it returns 0,0. -func decodeVarint(b []byte) (uint64, int) { - var x, y uint64 - if len(b) == 0 { - goto bad - } - x = uint64(b[0]) - if x < 0x80 { - return x, 1 - } - x -= 0x80 - - if len(b) <= 1 { - goto bad - } - y = uint64(b[1]) - x += y << 7 - if y < 0x80 { - return x, 2 - } - x -= 0x80 << 7 - - if len(b) <= 2 { - goto bad - } - y = uint64(b[2]) - x += y << 14 - if y < 0x80 { - return x, 3 - } - x -= 0x80 << 14 - - if len(b) <= 3 { - goto bad - } - y = uint64(b[3]) - x += y << 21 - if y < 0x80 { - return x, 4 - } - x -= 0x80 << 21 - - if len(b) <= 4 { - goto bad - } - y = uint64(b[4]) - x += y << 28 - if y < 0x80 { - return x, 5 - } - x -= 0x80 << 28 - - if len(b) <= 5 { - goto bad - } - y = uint64(b[5]) - x += y << 35 - if y < 0x80 { - return x, 6 - } - x -= 0x80 << 35 - - if len(b) <= 6 { - goto bad - } - y = uint64(b[6]) - x += y << 42 - if y < 0x80 { - return x, 7 - } - x -= 0x80 << 42 - - if len(b) <= 7 { - goto bad - } - y = uint64(b[7]) - x += y << 49 - if y < 0x80 { - return x, 8 - } - x -= 0x80 << 49 - - if len(b) <= 8 { - goto bad - } - y = uint64(b[8]) - x += y << 56 - if y < 0x80 { - return x, 9 - } - x -= 0x80 << 56 - - if len(b) <= 9 { - goto bad - } - y = uint64(b[9]) - x += y << 63 - if y < 2 { - return x, 10 - } - -bad: - return 0, 0 -} diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go deleted file mode 100644 index 1aaee72..0000000 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ /dev/null @@ -1,843 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if name == "XXX_NoUnkeyedLiteral" { - continue - } - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.MapValProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - - // Enums have a String method, so writeAny will work fine. - if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv.Addr() - if _, err := extendable(pv.Interface()); err == nil { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if v.CanAddr() { - // Calling v.Interface on a struct causes the reflect package to - // copy the entire struct. This is racy with the new Marshaler - // since we atomically update the XXX_sizecache. - // - // Thus, we retrieve a pointer to the struct if possible to avoid - // a race since v.Interface on the pointer doesn't copy the struct. - // - // If v is not addressable, then we are not worried about a race - // since it implies that the binary Marshaler cannot possibly be - // mutating this value. - v = v.Addr() - } - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - if err := tm.writeStruct(w, v); err != nil { - return err - } - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, err := fmt.Fprintf(w, "/* %v */\n", err) - return err - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, err := w.Write(endBraceNewline); err != nil { - return err - } - continue - } - if _, err := fmt.Fprint(w, tag); err != nil { - return err - } - if wire != WireStartGroup { - if err := w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err := w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err = w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - ep, _ := extendable(pv.Interface()) - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - m, mu := ep.extensionsRead() - if m == nil { - return nil - } - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(ep, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go new file mode 100644 index 0000000..47eb3e4 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_decode.go @@ -0,0 +1,801 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/prototext" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapTextUnmarshalV2 = false + +// ParseError is returned by UnmarshalText. +type ParseError struct { + Message string + + // Deprecated: Do not use. + Line, Offset int +} + +func (e *ParseError) Error() string { + if wrapTextUnmarshalV2 { + return e.Message + } + if e.Line == 1 { + return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message) + } + return fmt.Sprintf("line %d: %v", e.Line, e.Message) +} + +// UnmarshalText parses a proto text formatted string into m. +func UnmarshalText(s string, m Message) error { + if u, ok := m.(encoding.TextUnmarshaler); ok { + return u.UnmarshalText([]byte(s)) + } + + m.Reset() + mi := MessageV2(m) + + if wrapTextUnmarshalV2 { + err := prototext.UnmarshalOptions{ + AllowPartial: true, + }.Unmarshal([]byte(s), mi) + if err != nil { + return &ParseError{Message: err.Error()} + } + return checkRequiredNotSet(mi) + } else { + if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil { + return err + } + return checkRequiredNotSet(mi) + } +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) { + md := m.Descriptor() + fds := md.Fields() + + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + seen := make(map[protoreflect.FieldNumber]bool) + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + if err := p.unmarshalExtensionOrAny(m, seen); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := protoreflect.Name(tok.value) + fd := fds.ByName(name) + switch { + case fd == nil: + gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name)))) + if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name { + fd = gd + } + case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name: + fd = nil + case fd.IsWeak() && fd.Message().IsPlaceholder(): + fd = nil + } + if fd == nil { + typeName := string(md.FullName()) + if m, ok := m.Interface().(Message); ok { + t := reflect.TypeOf(m) + if t.Kind() == reflect.Ptr { + typeName = t.Elem().String() + } + } + return p.errorf("unknown field name %q in %v", name, typeName) + } + if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name()) + } + if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] { + return p.errorf("non-repeated field %q was repeated", fd.Name()) + } + seen[fd.Number()] = true + + // Consume any colon. + if err := p.checkForColon(fd); err != nil { + return err + } + + // Parse into the field. + v := m.Get(fd) + if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { + v = m.Mutable(fd) + } + if v, err = p.unmarshalValue(v, fd); err != nil { + return err + } + m.Set(fd, v) + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + } + return nil +} + +func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error { + name, err := p.consumeExtensionOrAnyName() + if err != nil { + return err + } + + // If it contains a slash, it's an Any type URL. + if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 { + tok := p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + + mt, err := protoregistry.GlobalTypes.FindMessageByURL(name) + if err != nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):]) + } + m2 := mt.New() + if err := p.unmarshalMessage(m2, terminator); err != nil { + return err + } + b, err := protoV2.Marshal(m2.Interface()) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err) + } + + urlFD := m.Descriptor().Fields().ByName("type_url") + valFD := m.Descriptor().Fields().ByName("value") + if seen[urlFD.Number()] { + return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name()) + } + if seen[valFD.Number()] { + return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name()) + } + m.Set(urlFD, protoreflect.ValueOfString(name)) + m.Set(valFD, protoreflect.ValueOfBytes(b)) + seen[urlFD.Number()] = true + seen[valFD.Number()] = true + return nil + } + + xname := protoreflect.FullName(name) + xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) + if xt == nil && isMessageSet(m.Descriptor()) { + xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) + } + if xt == nil { + return p.errorf("unrecognized extension %q", name) + } + fd := xt.TypeDescriptor() + if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { + return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName()) + } + + if err := p.checkForColon(fd); err != nil { + return err + } + + v := m.Get(fd) + if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { + v = m.Mutable(fd) + } + v, err = p.unmarshalValue(v, fd) + if err != nil { + return err + } + m.Set(fd, v) + return p.consumeOptionalSeparator() +} + +func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "" { + return v, p.errorf("unexpected EOF") + } + + switch { + case fd.IsList(): + lv := v.List() + var err error + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + vv := lv.NewElement() + vv, err = p.unmarshalSingularValue(vv, fd) + if err != nil { + return v, err + } + lv.Append(vv) + + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return v, p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return v, nil + } + + // One value of the repeated field. + p.back() + vv := lv.NewElement() + vv, err = p.unmarshalSingularValue(vv, fd) + if err != nil { + return v, err + } + lv.Append(vv) + return v, nil + case fd.IsMap(): + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return v, p.errorf("expected '{' or '<', found %q", tok.value) + } + + keyFD := fd.MapKey() + valFD := fd.MapValue() + + mv := v.Map() + kv := keyFD.Default() + vv := mv.NewValue() + for { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == terminator { + break + } + var err error + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return v, err + } + if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil { + return v, err + } + if err := p.consumeOptionalSeparator(); err != nil { + return v, err + } + case "value": + if err := p.checkForColon(valFD); err != nil { + return v, err + } + if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil { + return v, err + } + if err := p.consumeOptionalSeparator(); err != nil { + return v, err + } + default: + p.back() + return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + mv.Set(kv.MapKey(), vv) + return v, nil + default: + p.back() + return p.unmarshalSingularValue(v, fd) + } +} + +func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "" { + return v, p.errorf("unexpected EOF") + } + + switch fd.Kind() { + case protoreflect.BoolKind: + switch tok.value { + case "true", "1", "t", "True": + return protoreflect.ValueOfBool(true), nil + case "false", "0", "f", "False": + return protoreflect.ValueOfBool(false), nil + } + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfInt32(int32(x)), nil + } + + // The C++ parser accepts large positive hex numbers that uses + // two's complement arithmetic to represent negative numbers. + // This feature is here for backwards compatibility with C++. + if strings.HasPrefix(tok.value, "0x") { + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil + } + } + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfInt64(int64(x)), nil + } + + // The C++ parser accepts large positive hex numbers that uses + // two's complement arithmetic to represent negative numbers. + // This feature is here for backwards compatibility with C++. + if strings.HasPrefix(tok.value, "0x") { + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil + } + } + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfUint32(uint32(x)), nil + } + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfUint64(uint64(x)), nil + } + case protoreflect.FloatKind: + // Ignore 'f' for compatibility with output generated by C++, + // but don't remove 'f' when the value is "-inf" or "inf". + v := tok.value + if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { + v = v[:len(v)-len("f")] + } + if x, err := strconv.ParseFloat(v, 32); err == nil { + return protoreflect.ValueOfFloat32(float32(x)), nil + } + case protoreflect.DoubleKind: + // Ignore 'f' for compatibility with output generated by C++, + // but don't remove 'f' when the value is "-inf" or "inf". + v := tok.value + if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { + v = v[:len(v)-len("f")] + } + if x, err := strconv.ParseFloat(v, 64); err == nil { + return protoreflect.ValueOfFloat64(float64(x)), nil + } + case protoreflect.StringKind: + if isQuote(tok.value[0]) { + return protoreflect.ValueOfString(tok.unquoted), nil + } + case protoreflect.BytesKind: + if isQuote(tok.value[0]) { + return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil + } + case protoreflect.EnumKind: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil + } + vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value)) + if vd != nil { + return protoreflect.ValueOfEnum(vd.Number()), nil + } + case protoreflect.MessageKind, protoreflect.GroupKind: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return v, p.errorf("expected '{' or '<', found %q", tok.value) + } + err := p.unmarshalMessage(v.Message(), terminator) + return v, err + default: + panic(fmt.Sprintf("invalid kind %v", fd.Kind())) + } + return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value) +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + if fd.Message() == nil { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +// consumeExtensionOrAnyName consumes an extension name or an Any type URL and +// the following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtensionOrAnyName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in unmarshalMessage to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +var errBadUTF8 = errors.New("proto: bad UTF-8") + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(rune(i)), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go new file mode 100644 index 0000000..a31134e --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_encode.go @@ -0,0 +1,560 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "encoding" + "fmt" + "io" + "math" + "sort" + "strings" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapTextMarshalV2 = false + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line) + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes the proto text format of m to w. +func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error { + b, err := tm.marshal(m) + if len(b) > 0 { + if _, err := w.Write(b); err != nil { + return err + } + } + return err +} + +// Text returns a proto text formatted string of m. +func (tm *TextMarshaler) Text(m Message) string { + b, _ := tm.marshal(m) + return string(b) +} + +func (tm *TextMarshaler) marshal(m Message) ([]byte, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return []byte(""), nil + } + + if wrapTextMarshalV2 { + if m, ok := m.(encoding.TextMarshaler); ok { + return m.MarshalText() + } + + opts := prototext.MarshalOptions{ + AllowPartial: true, + EmitUnknown: true, + } + if !tm.Compact { + opts.Indent = " " + } + if !tm.ExpandAny { + opts.Resolver = (*protoregistry.Types)(nil) + } + return opts.Marshal(mr.Interface()) + } else { + w := &textWriter{ + compact: tm.Compact, + expandAny: tm.ExpandAny, + complete: true, + } + + if m, ok := m.(encoding.TextMarshaler); ok { + b, err := m.MarshalText() + if err != nil { + return nil, err + } + w.Write(b) + return w.buf, nil + } + + err := w.writeMessage(mr) + return w.buf, err + } +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// MarshalText writes the proto text format of m to w. +func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) } + +// MarshalTextString returns a proto text formatted string of m. +func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) } + +// CompactText writes the compact proto text format of m to w. +func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) } + +// CompactTextString returns a compact proto text formatted string of m. +func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) } + +var ( + newline = []byte("\n") + endBraceNewline = []byte("}\n") + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + compact bool // same as TextMarshaler.Compact + expandAny bool // same as TextMarshaler.ExpandAny + complete bool // whether the current position is a complete line + indent int // indentation level; never negative + buf []byte +} + +func (w *textWriter) Write(p []byte) (n int, _ error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + w.buf = append(w.buf, p...) + w.complete = false + return len(p), nil + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + w.buf = append(w.buf, ' ') + n++ + } + w.buf = append(w.buf, frag...) + n += len(frag) + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + w.buf = append(w.buf, frag...) + n += len(frag) + if i+1 < len(frags) { + w.buf = append(w.buf, '\n') + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + w.buf = append(w.buf, c) + w.complete = c == '\n' + return nil +} + +func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + + if fd.Kind() != protoreflect.GroupKind { + w.buf = append(w.buf, fd.Name()...) + w.WriteByte(':') + } else { + // Use message type name for group field name. + w.buf = append(w.buf, fd.Message().Name()...) + } + + if !w.compact { + w.WriteByte(' ') + } +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) { + md := m.Descriptor() + fdURL := md.Fields().ByName("type_url") + fdVal := md.Fields().ByName("value") + + url := m.Get(fdURL).String() + mt, err := protoregistry.GlobalTypes.FindMessageByURL(url) + if err != nil { + return false, nil + } + + b := m.Get(fdVal).Bytes() + m2 := mt.New() + if err := proto.Unmarshal(b, m2.Interface()); err != nil { + return false, nil + } + w.Write([]byte("[")) + if requiresQuotes(url) { + w.writeQuotedString(url) + } else { + w.Write([]byte(url)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.indent++ + } + if err := w.writeMessage(m2); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.indent-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (w *textWriter) writeMessage(m protoreflect.Message) error { + md := m.Descriptor() + if w.expandAny && md.FullName() == "google.protobuf.Any" { + if canExpand, err := w.writeProto3Any(m); canExpand { + return err + } + } + + fds := md.Fields() + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + } else { + i++ + } + if fd == nil || !m.Has(fd) { + continue + } + + switch { + case fd.IsList(): + lv := m.Get(fd).List() + for j := 0; j < lv.Len(); j++ { + w.writeName(fd) + v := lv.Get(j) + if err := w.writeSingularValue(v, fd); err != nil { + return err + } + w.WriteByte('\n') + } + case fd.IsMap(): + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := m.Get(fd).Map() + + type entry struct{ key, val protoreflect.Value } + var entries []entry + mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + entries = append(entries, entry{k.Value(), v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + switch kfd.Kind() { + case protoreflect.BoolKind: + return !entries[i].key.Bool() && entries[j].key.Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return entries[i].key.Int() < entries[j].key.Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return entries[i].key.Uint() < entries[j].key.Uint() + case protoreflect.StringKind: + return entries[i].key.String() < entries[j].key.String() + default: + panic("invalid kind") + } + }) + for _, entry := range entries { + w.writeName(fd) + w.WriteByte('<') + if !w.compact { + w.WriteByte('\n') + } + w.indent++ + w.writeName(kfd) + if err := w.writeSingularValue(entry.key, kfd); err != nil { + return err + } + w.WriteByte('\n') + w.writeName(vfd) + if err := w.writeSingularValue(entry.val, vfd); err != nil { + return err + } + w.WriteByte('\n') + w.indent-- + w.WriteByte('>') + w.WriteByte('\n') + } + default: + w.writeName(fd) + if err := w.writeSingularValue(m.Get(fd), fd); err != nil { + return err + } + w.WriteByte('\n') + } + } + + if b := m.GetUnknown(); len(b) > 0 { + w.writeUnknownFields(b) + } + return w.writeExtensions(m) +} + +func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error { + switch fd.Kind() { + case protoreflect.FloatKind, protoreflect.DoubleKind: + switch vf := v.Float(); { + case math.IsInf(vf, +1): + w.Write(posInf) + case math.IsInf(vf, -1): + w.Write(negInf) + case math.IsNaN(vf): + w.Write(nan) + default: + fmt.Fprint(w, v.Interface()) + } + case protoreflect.StringKind: + // NOTE: This does not validate UTF-8 for historical reasons. + w.writeQuotedString(string(v.String())) + case protoreflect.BytesKind: + w.writeQuotedString(string(v.Bytes())) + case protoreflect.MessageKind, protoreflect.GroupKind: + var bra, ket byte = '<', '>' + if fd.Kind() == protoreflect.GroupKind { + bra, ket = '{', '}' + } + w.WriteByte(bra) + if !w.compact { + w.WriteByte('\n') + } + w.indent++ + m := v.Message() + if m2, ok := m.Interface().(encoding.TextMarshaler); ok { + b, err := m2.MarshalText() + if err != nil { + return err + } + w.Write(b) + } else { + w.writeMessage(m) + } + w.indent-- + w.WriteByte(ket) + case protoreflect.EnumKind: + if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil { + fmt.Fprint(w, ev.Name()) + } else { + fmt.Fprint(w, v.Enum()) + } + default: + fmt.Fprint(w, v.Interface()) + } + return nil +} + +// writeQuotedString writes a quoted string in the protocol buffer text format. +func (w *textWriter) writeQuotedString(s string) { + w.WriteByte('"') + for i := 0; i < len(s); i++ { + switch c := s[i]; c { + case '\n': + w.buf = append(w.buf, `\n`...) + case '\r': + w.buf = append(w.buf, `\r`...) + case '\t': + w.buf = append(w.buf, `\t`...) + case '"': + w.buf = append(w.buf, `\"`...) + case '\\': + w.buf = append(w.buf, `\\`...) + default: + if isPrint := c >= 0x20 && c < 0x7f; isPrint { + w.buf = append(w.buf, c) + } else { + w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...) + } + } + } + w.WriteByte('"') +} + +func (w *textWriter) writeUnknownFields(b []byte) { + if !w.compact { + fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b)) + } + + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return + } + b = b[n:] + + if wtyp == protowire.EndGroupType { + w.indent-- + w.Write(endBraceNewline) + continue + } + fmt.Fprint(w, num) + if wtyp != protowire.StartGroupType { + w.WriteByte(':') + } + if !w.compact || wtyp == protowire.StartGroupType { + w.WriteByte(' ') + } + switch wtyp { + case protowire.VarintType: + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.Fixed32Type: + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.Fixed64Type: + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.BytesType: + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprintf(w, "%q", v) + case protowire.StartGroupType: + w.WriteByte('{') + w.indent++ + default: + fmt.Fprintf(w, "/* unknown wire type %d */", wtyp) + } + w.WriteByte('\n') + } +} + +// writeExtensions writes all the extensions in m. +func (w *textWriter) writeExtensions(m protoreflect.Message) error { + md := m.Descriptor() + if md.ExtensionRanges().Len() == 0 { + return nil + } + + type ext struct { + desc protoreflect.FieldDescriptor + val protoreflect.Value + } + var exts []ext + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + exts = append(exts, ext{fd, v}) + } + return true + }) + sort.Slice(exts, func(i, j int) bool { + return exts[i].desc.Number() < exts[j].desc.Number() + }) + + for _, ext := range exts { + // For message set, use the name of the message as the extension name. + name := string(ext.desc.FullName()) + if isMessageSet(ext.desc.ContainingMessage()) { + name = strings.TrimSuffix(name, ".message_set_extension") + } + + if !ext.desc.IsList() { + if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil { + return err + } + } else { + lv := ext.val.List() + for i := 0; i < lv.Len(); i++ { + if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil { + return err + } + } + } + } + return nil +} + +func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error { + fmt.Fprintf(w, "[%s]:", name) + if !w.compact { + w.WriteByte(' ') + } + if err := w.writeSingularValue(v, fd); err != nil { + return err + } + w.WriteByte('\n') + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + for i := 0; i < w.indent*2; i++ { + w.buf = append(w.buf, ' ') + } + w.complete = false +} diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go deleted file mode 100644 index bb55a3a..0000000 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ /dev/null @@ -1,880 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(i), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.MapKeyProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.MapValProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "]" { - break - } - if tok.value != "," { - return p.errorf("Expected ']' or ',' found %q", tok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - return um.UnmarshalText([]byte(s)) - } - pb.Reset() - v := reflect.ValueOf(pb) - return newTextParser(s).readStruct(v.Elem(), "") -} diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go new file mode 100644 index 0000000..d7c28da --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/wire.go @@ -0,0 +1,78 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/runtime/protoiface" +) + +// Size returns the size in bytes of the wire-format encoding of m. +func Size(m Message) int { + if m == nil { + return 0 + } + mi := MessageV2(m) + return protoV2.Size(mi) +} + +// Marshal returns the wire-format encoding of m. +func Marshal(m Message) ([]byte, error) { + b, err := marshalAppend(nil, m, false) + if b == nil { + b = zeroBytes + } + return b, err +} + +var zeroBytes = make([]byte, 0, 0) + +func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) { + if m == nil { + return nil, ErrNil + } + mi := MessageV2(m) + nbuf, err := protoV2.MarshalOptions{ + Deterministic: deterministic, + AllowPartial: true, + }.MarshalAppend(buf, mi) + if err != nil { + return buf, err + } + if len(buf) == len(nbuf) { + if !mi.ProtoReflect().IsValid() { + return buf, ErrNil + } + } + return nbuf, checkRequiredNotSet(mi) +} + +// Unmarshal parses a wire-format message in b and places the decoded results in m. +// +// Unmarshal resets m before starting to unmarshal, so any existing data in m is always +// removed. Use UnmarshalMerge to preserve and append to existing data. +func Unmarshal(b []byte, m Message) error { + m.Reset() + return UnmarshalMerge(b, m) +} + +// UnmarshalMerge parses a wire-format message in b and places the decoded results in m. +func UnmarshalMerge(b []byte, m Message) error { + mi := MessageV2(m) + out, err := protoV2.UnmarshalOptions{ + AllowPartial: true, + Merge: true, + }.UnmarshalState(protoiface.UnmarshalInput{ + Buf: b, + Message: mi.ProtoReflect(), + }) + if err != nil { + return err + } + if out.Flags&protoiface.UnmarshalInitialized > 0 { + return nil + } + return checkRequiredNotSet(mi) +} diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go new file mode 100644 index 0000000..398e348 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/wrappers.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +// Bool stores v in a new bool value and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int stores v in a new int32 value and returns a pointer to it. +// +// Deprecated: Use Int32 instead. +func Int(v int) *int32 { return Int32(int32(v)) } + +// Int32 stores v in a new int32 value and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 stores v in a new int64 value and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Uint32 stores v in a new uint32 value and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 stores v in a new uint64 value and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// Float32 stores v in a new float32 value and returns a pointer to it. +func Float32(v float32) *float32 { return &v } + +// Float64 stores v in a new float64 value and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// String stores v in a new string value and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go new file mode 100644 index 0000000..63dc057 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -0,0 +1,200 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto + +package descriptor + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/descriptor.proto. + +type FieldDescriptorProto_Type = descriptorpb.FieldDescriptorProto_Type + +const FieldDescriptorProto_TYPE_DOUBLE = descriptorpb.FieldDescriptorProto_TYPE_DOUBLE +const FieldDescriptorProto_TYPE_FLOAT = descriptorpb.FieldDescriptorProto_TYPE_FLOAT +const FieldDescriptorProto_TYPE_INT64 = descriptorpb.FieldDescriptorProto_TYPE_INT64 +const FieldDescriptorProto_TYPE_UINT64 = descriptorpb.FieldDescriptorProto_TYPE_UINT64 +const FieldDescriptorProto_TYPE_INT32 = descriptorpb.FieldDescriptorProto_TYPE_INT32 +const FieldDescriptorProto_TYPE_FIXED64 = descriptorpb.FieldDescriptorProto_TYPE_FIXED64 +const FieldDescriptorProto_TYPE_FIXED32 = descriptorpb.FieldDescriptorProto_TYPE_FIXED32 +const FieldDescriptorProto_TYPE_BOOL = descriptorpb.FieldDescriptorProto_TYPE_BOOL +const FieldDescriptorProto_TYPE_STRING = descriptorpb.FieldDescriptorProto_TYPE_STRING +const FieldDescriptorProto_TYPE_GROUP = descriptorpb.FieldDescriptorProto_TYPE_GROUP +const FieldDescriptorProto_TYPE_MESSAGE = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE +const FieldDescriptorProto_TYPE_BYTES = descriptorpb.FieldDescriptorProto_TYPE_BYTES +const FieldDescriptorProto_TYPE_UINT32 = descriptorpb.FieldDescriptorProto_TYPE_UINT32 +const FieldDescriptorProto_TYPE_ENUM = descriptorpb.FieldDescriptorProto_TYPE_ENUM +const FieldDescriptorProto_TYPE_SFIXED32 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED32 +const FieldDescriptorProto_TYPE_SFIXED64 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED64 +const FieldDescriptorProto_TYPE_SINT32 = descriptorpb.FieldDescriptorProto_TYPE_SINT32 +const FieldDescriptorProto_TYPE_SINT64 = descriptorpb.FieldDescriptorProto_TYPE_SINT64 + +var FieldDescriptorProto_Type_name = descriptorpb.FieldDescriptorProto_Type_name +var FieldDescriptorProto_Type_value = descriptorpb.FieldDescriptorProto_Type_value + +type FieldDescriptorProto_Label = descriptorpb.FieldDescriptorProto_Label + +const FieldDescriptorProto_LABEL_OPTIONAL = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL +const FieldDescriptorProto_LABEL_REQUIRED = descriptorpb.FieldDescriptorProto_LABEL_REQUIRED +const FieldDescriptorProto_LABEL_REPEATED = descriptorpb.FieldDescriptorProto_LABEL_REPEATED + +var FieldDescriptorProto_Label_name = descriptorpb.FieldDescriptorProto_Label_name +var FieldDescriptorProto_Label_value = descriptorpb.FieldDescriptorProto_Label_value + +type FileOptions_OptimizeMode = descriptorpb.FileOptions_OptimizeMode + +const FileOptions_SPEED = descriptorpb.FileOptions_SPEED +const FileOptions_CODE_SIZE = descriptorpb.FileOptions_CODE_SIZE +const FileOptions_LITE_RUNTIME = descriptorpb.FileOptions_LITE_RUNTIME + +var FileOptions_OptimizeMode_name = descriptorpb.FileOptions_OptimizeMode_name +var FileOptions_OptimizeMode_value = descriptorpb.FileOptions_OptimizeMode_value + +type FieldOptions_CType = descriptorpb.FieldOptions_CType + +const FieldOptions_STRING = descriptorpb.FieldOptions_STRING +const FieldOptions_CORD = descriptorpb.FieldOptions_CORD +const FieldOptions_STRING_PIECE = descriptorpb.FieldOptions_STRING_PIECE + +var FieldOptions_CType_name = descriptorpb.FieldOptions_CType_name +var FieldOptions_CType_value = descriptorpb.FieldOptions_CType_value + +type FieldOptions_JSType = descriptorpb.FieldOptions_JSType + +const FieldOptions_JS_NORMAL = descriptorpb.FieldOptions_JS_NORMAL +const FieldOptions_JS_STRING = descriptorpb.FieldOptions_JS_STRING +const FieldOptions_JS_NUMBER = descriptorpb.FieldOptions_JS_NUMBER + +var FieldOptions_JSType_name = descriptorpb.FieldOptions_JSType_name +var FieldOptions_JSType_value = descriptorpb.FieldOptions_JSType_value + +type MethodOptions_IdempotencyLevel = descriptorpb.MethodOptions_IdempotencyLevel + +const MethodOptions_IDEMPOTENCY_UNKNOWN = descriptorpb.MethodOptions_IDEMPOTENCY_UNKNOWN +const MethodOptions_NO_SIDE_EFFECTS = descriptorpb.MethodOptions_NO_SIDE_EFFECTS +const MethodOptions_IDEMPOTENT = descriptorpb.MethodOptions_IDEMPOTENT + +var MethodOptions_IdempotencyLevel_name = descriptorpb.MethodOptions_IdempotencyLevel_name +var MethodOptions_IdempotencyLevel_value = descriptorpb.MethodOptions_IdempotencyLevel_value + +type FileDescriptorSet = descriptorpb.FileDescriptorSet +type FileDescriptorProto = descriptorpb.FileDescriptorProto +type DescriptorProto = descriptorpb.DescriptorProto +type ExtensionRangeOptions = descriptorpb.ExtensionRangeOptions +type FieldDescriptorProto = descriptorpb.FieldDescriptorProto +type OneofDescriptorProto = descriptorpb.OneofDescriptorProto +type EnumDescriptorProto = descriptorpb.EnumDescriptorProto +type EnumValueDescriptorProto = descriptorpb.EnumValueDescriptorProto +type ServiceDescriptorProto = descriptorpb.ServiceDescriptorProto +type MethodDescriptorProto = descriptorpb.MethodDescriptorProto + +const Default_MethodDescriptorProto_ClientStreaming = descriptorpb.Default_MethodDescriptorProto_ClientStreaming +const Default_MethodDescriptorProto_ServerStreaming = descriptorpb.Default_MethodDescriptorProto_ServerStreaming + +type FileOptions = descriptorpb.FileOptions + +const Default_FileOptions_JavaMultipleFiles = descriptorpb.Default_FileOptions_JavaMultipleFiles +const Default_FileOptions_JavaStringCheckUtf8 = descriptorpb.Default_FileOptions_JavaStringCheckUtf8 +const Default_FileOptions_OptimizeFor = descriptorpb.Default_FileOptions_OptimizeFor +const Default_FileOptions_CcGenericServices = descriptorpb.Default_FileOptions_CcGenericServices +const Default_FileOptions_JavaGenericServices = descriptorpb.Default_FileOptions_JavaGenericServices +const Default_FileOptions_PyGenericServices = descriptorpb.Default_FileOptions_PyGenericServices +const Default_FileOptions_PhpGenericServices = descriptorpb.Default_FileOptions_PhpGenericServices +const Default_FileOptions_Deprecated = descriptorpb.Default_FileOptions_Deprecated +const Default_FileOptions_CcEnableArenas = descriptorpb.Default_FileOptions_CcEnableArenas + +type MessageOptions = descriptorpb.MessageOptions + +const Default_MessageOptions_MessageSetWireFormat = descriptorpb.Default_MessageOptions_MessageSetWireFormat +const Default_MessageOptions_NoStandardDescriptorAccessor = descriptorpb.Default_MessageOptions_NoStandardDescriptorAccessor +const Default_MessageOptions_Deprecated = descriptorpb.Default_MessageOptions_Deprecated + +type FieldOptions = descriptorpb.FieldOptions + +const Default_FieldOptions_Ctype = descriptorpb.Default_FieldOptions_Ctype +const Default_FieldOptions_Jstype = descriptorpb.Default_FieldOptions_Jstype +const Default_FieldOptions_Lazy = descriptorpb.Default_FieldOptions_Lazy +const Default_FieldOptions_Deprecated = descriptorpb.Default_FieldOptions_Deprecated +const Default_FieldOptions_Weak = descriptorpb.Default_FieldOptions_Weak + +type OneofOptions = descriptorpb.OneofOptions +type EnumOptions = descriptorpb.EnumOptions + +const Default_EnumOptions_Deprecated = descriptorpb.Default_EnumOptions_Deprecated + +type EnumValueOptions = descriptorpb.EnumValueOptions + +const Default_EnumValueOptions_Deprecated = descriptorpb.Default_EnumValueOptions_Deprecated + +type ServiceOptions = descriptorpb.ServiceOptions + +const Default_ServiceOptions_Deprecated = descriptorpb.Default_ServiceOptions_Deprecated + +type MethodOptions = descriptorpb.MethodOptions + +const Default_MethodOptions_Deprecated = descriptorpb.Default_MethodOptions_Deprecated +const Default_MethodOptions_IdempotencyLevel = descriptorpb.Default_MethodOptions_IdempotencyLevel + +type UninterpretedOption = descriptorpb.UninterpretedOption +type SourceCodeInfo = descriptorpb.SourceCodeInfo +type GeneratedCodeInfo = descriptorpb.GeneratedCodeInfo +type DescriptorProto_ExtensionRange = descriptorpb.DescriptorProto_ExtensionRange +type DescriptorProto_ReservedRange = descriptorpb.DescriptorProto_ReservedRange +type EnumDescriptorProto_EnumReservedRange = descriptorpb.EnumDescriptorProto_EnumReservedRange +type UninterpretedOption_NamePart = descriptorpb.UninterpretedOption_NamePart +type SourceCodeInfo_Location = descriptorpb.SourceCodeInfo_Location +type GeneratedCodeInfo_Annotation = descriptorpb.GeneratedCodeInfo_Annotation + +var File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = []byte{ + 0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, + 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x32, +} + +var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() } +func file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() { + if File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto = out.File + file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = nil + file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = nil + file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index 70276e8..e729dcf 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -1,141 +1,165 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package ptypes -// This file implements functions to marshal proto.Message to/from -// google.protobuf.Any message. - import ( "fmt" - "reflect" "strings" "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + anypb "github.com/golang/protobuf/ptypes/any" ) -const googleApis = "type.googleapis.com/" +const urlPrefix = "type.googleapis.com/" -// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. -// -// Note that regular type assertions should be done using the Is -// function. AnyMessageName is provided for less common use cases like filtering a -// sequence of Any messages based on a set of allowed message type names. -func AnyMessageName(any *any.Any) (string, error) { +// AnyMessageName returns the message name contained in an anypb.Any message. +// Most type assertions should use the Is function instead. +func AnyMessageName(any *anypb.Any) (string, error) { + name, err := anyMessageName(any) + return string(name), err +} +func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { if any == nil { return "", fmt.Errorf("message is nil") } - slash := strings.LastIndex(any.TypeUrl, "/") - if slash < 0 { + name := protoreflect.FullName(any.TypeUrl) + if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 { + name = name[i+len("/"):] + } + if !name.IsValid() { return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) } - return any.TypeUrl[slash+1:], nil + return name, nil } -// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. -func MarshalAny(pb proto.Message) (*any.Any, error) { - value, err := proto.Marshal(pb) +// MarshalAny marshals the given message m into an anypb.Any message. +func MarshalAny(m proto.Message) (*anypb.Any, error) { + switch dm := m.(type) { + case DynamicAny: + m = dm.Message + case *DynamicAny: + if dm == nil { + return nil, proto.ErrNil + } + m = dm.Message + } + b, err := proto.Marshal(m) if err != nil { return nil, err } - return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil -} - -// DynamicAny is a value that can be passed to UnmarshalAny to automatically -// allocate a proto.Message for the type specified in a google.protobuf.Any -// message. The allocated message is stored in the embedded proto.Message. -// -// Example: -// -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) -type DynamicAny struct { - proto.Message + return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil } -// Empty returns a new proto.Message of the type specified in a -// google.protobuf.Any message. It returns an error if corresponding message -// type isn't linked in. -func Empty(any *any.Any) (proto.Message, error) { - aname, err := AnyMessageName(any) +// Empty returns a new message of the type specified in an anypb.Any message. +// It returns protoregistry.NotFound if the corresponding message type could not +// be resolved in the global registry. +func Empty(any *anypb.Any) (proto.Message, error) { + name, err := anyMessageName(any) if err != nil { return nil, err } - - t := proto.MessageType(aname) - if t == nil { - return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + mt, err := protoregistry.GlobalTypes.FindMessageByName(name) + if err != nil { + return nil, err } - return reflect.New(t.Elem()).Interface().(proto.Message), nil + return proto.MessageV1(mt.New().Interface()), nil } -// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any -// message and places the decoded result in pb. It returns an error if type of -// contents of Any message does not match type of pb message. +// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message +// into the provided message m. It returns an error if the target message +// does not match the type in the Any message or if an unmarshal error occurs. // -// pb can be a proto.Message, or a *DynamicAny. -func UnmarshalAny(any *any.Any, pb proto.Message) error { - if d, ok := pb.(*DynamicAny); ok { - if d.Message == nil { +// The target message m may be a *DynamicAny message. If the underlying message +// type could not be resolved, then this returns protoregistry.NotFound. +func UnmarshalAny(any *anypb.Any, m proto.Message) error { + if dm, ok := m.(*DynamicAny); ok { + if dm.Message == nil { var err error - d.Message, err = Empty(any) + dm.Message, err = Empty(any) if err != nil { return err } } - return UnmarshalAny(any, d.Message) + m = dm.Message } - aname, err := AnyMessageName(any) + anyName, err := AnyMessageName(any) if err != nil { return err } - - mname := proto.MessageName(pb) - if aname != mname { - return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + msgName := proto.MessageName(m) + if anyName != msgName { + return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName) } - return proto.Unmarshal(any.Value, pb) + return proto.Unmarshal(any.Value, m) } -// Is returns true if any value contains a given message type. -func Is(any *any.Any, pb proto.Message) bool { - // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), - // but it avoids scanning TypeUrl for the slash. - if any == nil { +// Is reports whether the Any message contains a message of the specified type. +func Is(any *anypb.Any, m proto.Message) bool { + if any == nil || m == nil { return false } - name := proto.MessageName(pb) - prefix := len(any.TypeUrl) - len(name) - return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name + name := proto.MessageName(m) + if !strings.HasSuffix(any.TypeUrl, name) { + return false + } + return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/' +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in an anypb.Any message. +// The allocated message is stored in the embedded proto.Message. +// +// Example: +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct{ proto.Message } + +func (m DynamicAny) String() string { + if m.Message == nil { + return "" + } + return m.Message.String() +} +func (m DynamicAny) Reset() { + if m.Message == nil { + return + } + m.Message.Reset() +} +func (m DynamicAny) ProtoMessage() { + return +} +func (m DynamicAny) ProtoReflect() protoreflect.Message { + if m.Message == nil { + return nil + } + return dynamicAny{proto.MessageReflect(m.Message)} +} + +type dynamicAny struct{ protoreflect.Message } + +func (m dynamicAny) Type() protoreflect.MessageType { + return dynamicAnyType{m.Message.Type()} +} +func (m dynamicAny) New() protoreflect.Message { + return dynamicAnyType{m.Message.Type()}.New() +} +func (m dynamicAny) Interface() protoreflect.ProtoMessage { + return DynamicAny{proto.MessageV1(m.Message.Interface())} +} + +type dynamicAnyType struct{ protoreflect.MessageType } + +func (t dynamicAnyType) New() protoreflect.Message { + return dynamicAny{t.MessageType.New()} +} +func (t dynamicAnyType) Zero() protoreflect.Message { + return dynamicAny{t.MessageType.Zero()} } diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go index 78ee523..0ef27d3 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -1,200 +1,62 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/any.proto +// source: github.com/golang/protobuf/ptypes/any/any.proto package any import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +// Symbols defined in public import of google/protobuf/any.proto. -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +type Any = anypb.Any -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) -// ... -// foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { -// ... -// } -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// -type Any struct { - // A URL/resource name that uniquely identifies the type of the serialized - // protocol buffer message. The last segment of the URL's path must represent - // the fully qualified name of the type (as in - // `path/google.protobuf.Duration`). The name should be in a canonical form - // (e.g., leading "." is not accepted). - // - // In practice, teams usually precompile into the binary all types that they - // expect it to use in the context of Any. However, for URLs which use the - // scheme `http`, `https`, or no scheme, one can optionally set up a type - // server that maps type URLs to message definitions as follows: - // - // * If no scheme is provided, `https` is assumed. - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Note: this functionality is not currently available in the official - // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - // - TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` - // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} +var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { - return fileDescriptor_b53526c13ae22eb4, []int{0} +var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{ + 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, + 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } -func (*Any) XXX_WellKnownType() string { return "Any" } - -func (m *Any) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Any.Unmarshal(m, b) -} -func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Any.Marshal(b, m, deterministic) -} -func (m *Any) XXX_Merge(src proto.Message) { - xxx_messageInfo_Any.Merge(m, src) -} -func (m *Any) XXX_Size() int { - return xxx_messageInfo_Any.Size(m) -} -func (m *Any) XXX_DiscardUnknown() { - xxx_messageInfo_Any.DiscardUnknown(m) +var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -var xxx_messageInfo_Any proto.InternalMessageInfo - -func (m *Any) GetTypeUrl() string { - if m != nil { - return m.TypeUrl +func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() } +func file_github_com_golang_protobuf_ptypes_any_any_proto_init() { + if File_github_com_golang_protobuf_ptypes_any_any_proto != nil { + return } - return "" -} - -func (m *Any) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*Any)(nil), "google.protobuf.Any") -} - -func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) } - -var fileDescriptor_b53526c13ae22eb4 = []byte{ - // 185 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, - 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, - 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, - 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, - 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce, - 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52, - 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, - 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, - 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, - 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, - 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_any_any_proto = out.File + file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil } diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto deleted file mode 100644 index 4932942..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto +++ /dev/null @@ -1,154 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option go_package = "github.com/golang/protobuf/ptypes/any"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "AnyProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) -// ... -// foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { -// ... -// } -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// -message Any { - // A URL/resource name that uniquely identifies the type of the serialized - // protocol buffer message. The last segment of the URL's path must represent - // the fully qualified name of the type (as in - // `path/google.protobuf.Duration`). The name should be in a canonical form - // (e.g., leading "." is not accepted). - // - // In practice, teams usually precompile into the binary all types that they - // expect it to use in the context of Any. However, for URLs which use the - // scheme `http`, `https`, or no scheme, one can optionally set up a type - // server that maps type URLs to message definitions as follows: - // - // * If no scheme is provided, `https` is assumed. - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Note: this functionality is not currently available in the official - // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - // - string type_url = 1; - - // Must be a valid serialized protocol buffer of the above specified type. - bytes value = 2; -} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go index c0d595d..fb9edd5 100644 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -1,35 +1,6 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. -/* -Package ptypes contains code for interacting with well-known types. -*/ +// Package ptypes provides functionality for interacting with well-known types. package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go index 26d1ca2..6110ae8 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -1,102 +1,72 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package ptypes -// This file implements conversions between google.protobuf.Duration -// and time.Duration. - import ( "errors" "fmt" "time" - durpb "github.com/golang/protobuf/ptypes/duration" + durationpb "github.com/golang/protobuf/ptypes/duration" ) +// Range of google.protobuf.Duration as specified in duration.proto. +// This is about 10,000 years in seconds. const ( - // Range of a durpb.Duration in seconds, as specified in - // google/protobuf/duration.proto. This is about 10,000 years in seconds. maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) minSeconds = -maxSeconds ) -// validateDuration determines whether the durpb.Duration is valid according to the -// definition in google/protobuf/duration.proto. A valid durpb.Duration -// may still be too large to fit into a time.Duration (the range of durpb.Duration -// is about 10,000 years, and the range of time.Duration is about 290). -func validateDuration(d *durpb.Duration) error { - if d == nil { - return errors.New("duration: nil Duration") - } - if d.Seconds < minSeconds || d.Seconds > maxSeconds { - return fmt.Errorf("duration: %v: seconds out of range", d) - } - if d.Nanos <= -1e9 || d.Nanos >= 1e9 { - return fmt.Errorf("duration: %v: nanos out of range", d) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { - return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) - } - return nil -} - -// Duration converts a durpb.Duration to a time.Duration. Duration -// returns an error if the durpb.Duration is invalid or is too large to be -// represented in a time.Duration. -func Duration(p *durpb.Duration) (time.Duration, error) { - if err := validateDuration(p); err != nil { +// Duration converts a durationpb.Duration to a time.Duration. +// Duration returns an error if dur is invalid or overflows a time.Duration. +func Duration(dur *durationpb.Duration) (time.Duration, error) { + if err := validateDuration(dur); err != nil { return 0, err } - d := time.Duration(p.Seconds) * time.Second - if int64(d/time.Second) != p.Seconds { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + d := time.Duration(dur.Seconds) * time.Second + if int64(d/time.Second) != dur.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) } - if p.Nanos != 0 { - d += time.Duration(p.Nanos) * time.Nanosecond - if (d < 0) != (p.Nanos < 0) { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + if dur.Nanos != 0 { + d += time.Duration(dur.Nanos) * time.Nanosecond + if (d < 0) != (dur.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) } } return d, nil } -// DurationProto converts a time.Duration to a durpb.Duration. -func DurationProto(d time.Duration) *durpb.Duration { +// DurationProto converts a time.Duration to a durationpb.Duration. +func DurationProto(d time.Duration) *durationpb.Duration { nanos := d.Nanoseconds() secs := nanos / 1e9 nanos -= secs * 1e9 - return &durpb.Duration{ - Seconds: secs, + return &durationpb.Duration{ + Seconds: int64(secs), Nanos: int32(nanos), } } + +// validateDuration determines whether the durationpb.Duration is valid +// according to the definition in google/protobuf/duration.proto. +// A valid durpb.Duration may still be too large to fit into a time.Duration +// Note that the range of durationpb.Duration is about 10,000 years, +// while the range of time.Duration is about 290 years. +func validateDuration(dur *durationpb.Duration) error { + if dur == nil { + return errors.New("duration: nil Duration") + } + if dur.Seconds < minSeconds || dur.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", dur) + } + if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", dur) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur) + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go index 0d681ee..d0079ee 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -1,161 +1,63 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/duration.proto +// source: github.com/golang/protobuf/ptypes/duration/duration.proto package duration import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +// Symbols defined in public import of google/protobuf/duration.proto. -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +type Duration = durationpb.Duration -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// # Examples -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (durations.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -// Example 3: Compute Duration from datetime.timedelta in Python. -// -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) -// -// # JSON Mapping -// -// In JSON format, the Duration type is encoded as a string rather than an -// object, where the string ends in the suffix "s" (indicating seconds) and -// is preceded by the number of seconds, with nanoseconds expressed as -// fractional seconds. For example, 3 seconds with 0 nanoseconds should be -// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -// microsecond should be expressed in JSON format as "3.000001s". -// -// -type Duration struct { - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. Note: these bounds are computed from: - // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} +var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor -func (m *Duration) Reset() { *m = Duration{} } -func (m *Duration) String() string { return proto.CompactTextString(m) } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { - return fileDescriptor_23597b2ebd7ac6c5, []int{0} +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{ + 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func (*Duration) XXX_WellKnownType() string { return "Duration" } - -func (m *Duration) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Duration.Unmarshal(m, b) -} -func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Duration.Marshal(b, m, deterministic) -} -func (m *Duration) XXX_Merge(src proto.Message) { - xxx_messageInfo_Duration.Merge(m, src) -} -func (m *Duration) XXX_Size() int { - return xxx_messageInfo_Duration.Size(m) -} -func (m *Duration) XXX_DiscardUnknown() { - xxx_messageInfo_Duration.DiscardUnknown(m) +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -var xxx_messageInfo_Duration proto.InternalMessageInfo - -func (m *Duration) GetSeconds() int64 { - if m != nil { - return m.Seconds +func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() } +func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() { + if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil { + return } - return 0 -} - -func (m *Duration) GetNanos() int32 { - if m != nil { - return m.Nanos - } - return 0 -} - -func init() { - proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") -} - -func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) } - -var fileDescriptor_23597b2ebd7ac6c5 = []byte{ - // 190 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, - 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, - 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, - 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, - 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c, - 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56, - 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, - 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4, - 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78, - 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63, - 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File + file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil } diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto deleted file mode 100644 index 975fce4..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto +++ /dev/null @@ -1,117 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/duration"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "DurationProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// # Examples -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (durations.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -// Example 3: Compute Duration from datetime.timedelta in Python. -// -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) -// -// # JSON Mapping -// -// In JSON format, the Duration type is encoded as a string rather than an -// object, where the string ends in the suffix "s" (indicating seconds) and -// is preceded by the number of seconds, with nanoseconds expressed as -// fractional seconds. For example, 3 seconds with 0 nanoseconds should be -// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -// microsecond should be expressed in JSON format as "3.000001s". -// -// -message Duration { - - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. Note: these bounds are computed from: - // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - int64 seconds = 1; - - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - int32 nanos = 2; -} diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go new file mode 100644 index 0000000..16686a6 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go @@ -0,0 +1,62 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/empty/empty.proto + +package empty + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/empty.proto. + +type Empty = emptypb.Empty + +var File_github_com_golang_protobuf_ptypes_empty_empty_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = []byte{ + 0x0a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x3b, 0x65, 0x6d, + 0x70, 0x74, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() } +func file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() { + if File_github_com_golang_protobuf_ptypes_empty_empty_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_empty_empty_proto = out.File + file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go index 8da0df0..026d0d4 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -1,46 +1,18 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package ptypes -// This file implements operations on google.protobuf.Timestamp. - import ( "errors" "fmt" "time" - tspb "github.com/golang/protobuf/ptypes/timestamp" + timestamppb "github.com/golang/protobuf/ptypes/timestamp" ) +// Range of google.protobuf.Duration as specified in timestamp.proto. const ( // Seconds field of the earliest valid Timestamp. // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). @@ -50,44 +22,18 @@ const ( maxValidSeconds = 253402300800 ) -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range -// [0001-01-01, 10000-01-01) and has a Nanos field -// in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes -// the problem. -// -// Every valid Timestamp can be represented by a time.Time, but the converse is not true. -func validateTimestamp(ts *tspb.Timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) - } - return nil -} - -// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. +// Timestamp converts a timestamppb.Timestamp to a time.Time. // It returns an error if the argument is invalid. // -// Unlike most Go functions, if Timestamp returns an error, the first return value -// is not the zero time.Time. Instead, it is the value obtained from the +// Unlike most Go functions, if Timestamp returns an error, the first return +// value is not the zero time.Time. Instead, it is the value obtained from the // time.Unix function when passed the contents of the Timestamp, in the UTC // locale. This may or may not be a meaningful time; many invalid Timestamps // do map to valid time.Times. // // A nil Timestamp returns an error. The first return value in that case is // undefined. -func Timestamp(ts *tspb.Timestamp) (time.Time, error) { +func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { // Don't return the zero value on error, because corresponds to a valid // timestamp. Instead return whatever time.Unix gives us. var t time.Time @@ -100,7 +46,7 @@ func Timestamp(ts *tspb.Timestamp) (time.Time, error) { } // TimestampNow returns a google.protobuf.Timestamp for the current time. -func TimestampNow() *tspb.Timestamp { +func TimestampNow() *timestamppb.Timestamp { ts, err := TimestampProto(time.Now()) if err != nil { panic("ptypes: time.Now() out of Timestamp range") @@ -110,8 +56,8 @@ func TimestampNow() *tspb.Timestamp { // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. // It returns an error if the resulting Timestamp is invalid. -func TimestampProto(t time.Time) (*tspb.Timestamp, error) { - ts := &tspb.Timestamp{ +func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { + ts := ×tamppb.Timestamp{ Seconds: t.Unix(), Nanos: int32(t.Nanosecond()), } @@ -121,12 +67,37 @@ func TimestampProto(t time.Time) (*tspb.Timestamp, error) { return ts, nil } -// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid -// Timestamps, it returns an error message in parentheses. -func TimestampString(ts *tspb.Timestamp) string { +// TimestampString returns the RFC 3339 string for valid Timestamps. +// For invalid Timestamps, it returns an error message in parentheses. +func TimestampString(ts *timestamppb.Timestamp) string { t, err := Timestamp(ts) if err != nil { return fmt.Sprintf("(%v)", err) } return t.Format(time.RFC3339Nano) } + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01) +// and has a Nanos field in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes the problem. +// +// Every valid Timestamp can be represented by a time.Time, +// but the converse is not true. +func validateTimestamp(ts *timestamppb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go index 31cd846..a76f807 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -1,179 +1,64 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/timestamp.proto +// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto package timestamp import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +// Symbols defined in public import of google/protobuf/timestamp.proto. -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +type Timestamp = timestamppb.Timestamp -// A Timestamp represents a point in time independent of any time zone -// or calendar, represented as seconds and fractions of seconds at -// nanosecond resolution in UTC Epoch time. It is encoded using the -// Proleptic Gregorian Calendar which extends the Gregorian calendar -// backwards to year one. It is encoded assuming all minutes are 60 -// seconds long, i.e. leap seconds are "smeared" so that no leap second -// table is needed for interpretation. Range is from -// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. -// By restricting to that range, we ensure that we can convert to -// and from RFC 3339 date strings. -// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// -// Example 5: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required. A proto3 JSON serializer should always use UTC (as indicated by -// "Z") when printing the Timestamp type and a proto3 JSON parser should be -// able to accept both UTC and other timezones (as indicated by an offset). -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) -// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one -// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- -// ) to obtain a formatter capable of generating timestamps in this format. -// -// -type Timestamp struct { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} +var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { - return fileDescriptor_292007bbfe81227e, []int{0} +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{ + 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37, + 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } -func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } - -func (m *Timestamp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Timestamp.Unmarshal(m, b) -} -func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) -} -func (m *Timestamp) XXX_Merge(src proto.Message) { - xxx_messageInfo_Timestamp.Merge(m, src) -} -func (m *Timestamp) XXX_Size() int { - return xxx_messageInfo_Timestamp.Size(m) -} -func (m *Timestamp) XXX_DiscardUnknown() { - xxx_messageInfo_Timestamp.DiscardUnknown(m) +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -var xxx_messageInfo_Timestamp proto.InternalMessageInfo - -func (m *Timestamp) GetSeconds() int64 { - if m != nil { - return m.Seconds +func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() } +func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() { + if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil { + return } - return 0 -} - -func (m *Timestamp) GetNanos() int32 { - if m != nil { - return m.Nanos - } - return 0 -} - -func init() { - proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") -} - -func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) } - -var fileDescriptor_292007bbfe81227e = []byte{ - // 191 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, - 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, - 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, - 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, - 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70, - 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, - 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, - 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71, - 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, - 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, - 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil } diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto deleted file mode 100644 index eafb3fa..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +++ /dev/null @@ -1,135 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/timestamp"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "TimestampProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// A Timestamp represents a point in time independent of any time zone -// or calendar, represented as seconds and fractions of seconds at -// nanosecond resolution in UTC Epoch time. It is encoded using the -// Proleptic Gregorian Calendar which extends the Gregorian calendar -// backwards to year one. It is encoded assuming all minutes are 60 -// seconds long, i.e. leap seconds are "smeared" so that no leap second -// table is needed for interpretation. Range is from -// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. -// By restricting to that range, we ensure that we can convert to -// and from RFC 3339 date strings. -// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// -// Example 5: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required. A proto3 JSON serializer should always use UTC (as indicated by -// "Z") when printing the Timestamp type and a proto3 JSON parser should be -// able to accept both UTC and other timezones (as indicated by an offset). -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) -// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one -// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- -// ) to obtain a formatter capable of generating timestamps in this format. -// -// -message Timestamp { - - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - int64 seconds = 1; - - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - int32 nanos = 2; -} diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md index 9d92c11..f765a46 100644 --- a/vendor/github.com/google/uuid/README.md +++ b/vendor/github.com/google/uuid/README.md @@ -16,4 +16,4 @@ change is the ability to represent an invalid UUID (vs a NIL UUID). Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: -http://godoc.org/github.com/google/uuid +http://pkg.go.dev/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go index 7f9e0c6..14bd340 100644 --- a/vendor/github.com/google/uuid/marshal.go +++ b/vendor/github.com/google/uuid/marshal.go @@ -16,10 +16,11 @@ func (uuid UUID) MarshalText() ([]byte, error) { // UnmarshalText implements encoding.TextUnmarshaler. func (uuid *UUID) UnmarshalText(data []byte) error { id, err := ParseBytes(data) - if err == nil { - *uuid = id + if err != nil { + return err } - return err + *uuid = id + return nil } // MarshalBinary implements encoding.BinaryMarshaler. diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go index 199a1ac..4631096 100644 --- a/vendor/github.com/google/uuid/version1.go +++ b/vendor/github.com/google/uuid/version1.go @@ -17,12 +17,6 @@ import ( // // In most cases, New should be used. func NewUUID() (UUID, error) { - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nodeMu.Unlock() - var uuid UUID now, seq, err := GetTime() if err != nil { @@ -38,7 +32,13 @@ func NewUUID() (UUID, error) { binary.BigEndian.PutUint16(uuid[4:], timeMid) binary.BigEndian.PutUint16(uuid[6:], timeHi) binary.BigEndian.PutUint16(uuid[8:], seq) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() return uuid, nil } diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go index 84af91c..c110465 100644 --- a/vendor/github.com/google/uuid/version4.go +++ b/vendor/github.com/google/uuid/version4.go @@ -27,8 +27,13 @@ func New() UUID { // equivalent to the odds of creating a few tens of trillions of UUIDs in a // year and having one duplicate. func NewRandom() (UUID, error) { + return NewRandomFromReader(rander) +} + +// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. +func NewRandomFromReader(r io.Reader) (UUID, error) { var uuid UUID - _, err := io.ReadFull(rander, uuid[:]) + _, err := io.ReadFull(r, uuid[:]) if err != nil { return Nil, err } diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md index 0827d05..19aa2e7 100644 --- a/vendor/github.com/gorilla/websocket/README.md +++ b/vendor/github.com/gorilla/websocket/README.md @@ -8,7 +8,7 @@ Gorilla WebSocket is a [Go](http://golang.org/) implementation of the ### Documentation -* [API Reference](http://godoc.org/github.com/gorilla/websocket) +* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc) * [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) * [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) * [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go index 6f17cd2..ca46d2f 100644 --- a/vendor/github.com/gorilla/websocket/conn.go +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -244,8 +244,8 @@ type Conn struct { subprotocol string // Write fields - mu chan bool // used as mutex to protect write to conn - writeBuf []byte // frame is constructed in this buffer. + mu chan struct{} // used as mutex to protect write to conn + writeBuf []byte // frame is constructed in this buffer. writePool BufferPool writeBufSize int writeDeadline time.Time @@ -302,8 +302,8 @@ func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBuf = make([]byte, writeBufferSize) } - mu := make(chan bool, 1) - mu <- true + mu := make(chan struct{}, 1) + mu <- struct{}{} c := &Conn{ isServer: isServer, br: br, @@ -377,7 +377,7 @@ func (c *Conn) read(n int) ([]byte, error) { func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error { <-c.mu - defer func() { c.mu <- true }() + defer func() { c.mu <- struct{}{} }() c.writeErrMu.Lock() err := c.writeErr @@ -429,7 +429,7 @@ func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) er maskBytes(key, 0, buf[6:]) } - d := time.Hour * 1000 + d := 1000 * time.Hour if !deadline.IsZero() { d = deadline.Sub(time.Now()) if d < 0 { @@ -444,7 +444,7 @@ func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) er case <-timer.C: return errWriteTimeout } - defer func() { c.mu <- true }() + defer func() { c.mu <- struct{}{} }() c.writeErrMu.Lock() err := c.writeErr diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go index c6f4df8..8db0cef 100644 --- a/vendor/github.com/gorilla/websocket/doc.go +++ b/vendor/github.com/gorilla/websocket/doc.go @@ -187,9 +187,9 @@ // than the largest message do not provide any benefit. // // Depending on the distribution of message sizes, setting the buffer size to -// to a value less than the maximum expected message size can greatly reduce -// memory use with a small impact on performance. Here's an example: If 99% of -// the messages are smaller than 256 bytes and the maximum message size is 512 +// a value less than the maximum expected message size can greatly reduce memory +// use with a small impact on performance. Here's an example: If 99% of the +// messages are smaller than 256 bytes and the maximum message size is 512 // bytes, then a buffer size of 256 bytes will result in 1.01 more system calls // than a buffer size of 512 bytes. The memory savings is 50%. // diff --git a/vendor/github.com/gorilla/websocket/go.sum b/vendor/github.com/gorilla/websocket/go.sum index cf4fbba..e69de29 100644 --- a/vendor/github.com/gorilla/websocket/go.sum +++ b/vendor/github.com/gorilla/websocket/go.sum @@ -1,2 +0,0 @@ -github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go index 74ec565..c854225 100644 --- a/vendor/github.com/gorilla/websocket/prepared.go +++ b/vendor/github.com/gorilla/websocket/prepared.go @@ -73,8 +73,8 @@ func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { // Prepare a frame using a 'fake' connection. // TODO: Refactor code in conn.go to allow more direct construction of // the frame. - mu := make(chan bool, 1) - mu <- true + mu := make(chan struct{}, 1) + mu <- struct{}{} var nc prepareConn c := &Conn{ conn: &nc, diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE new file mode 100644 index 0000000..c33dcc7 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md new file mode 100644 index 0000000..444df08 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/README.md @@ -0,0 +1,89 @@ +# errwrap + +`errwrap` is a package for Go that formalizes the pattern of wrapping errors +and checking if an error contains another error. + +There is a common pattern in Go of taking a returned `error` value and +then wrapping it (such as with `fmt.Errorf`) before returning it. The problem +with this pattern is that you completely lose the original `error` structure. + +Arguably the _correct_ approach is that you should make a custom structure +implementing the `error` interface, and have the original error as a field +on that structure, such [as this example](http://golang.org/pkg/os/#PathError). +This is a good approach, but you have to know the entire chain of possible +rewrapping that happens, when you might just care about one. + +`errwrap` formalizes this pattern (it doesn't matter what approach you use +above) by giving a single interface for wrapping errors, checking if a specific +error is wrapped, and extracting that error. + +## Installation and Docs + +Install using `go get github.com/hashicorp/errwrap`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/errwrap + +## Usage + +#### Basic Usage + +Below is a very basic example of its usage: + +```go +// A function that always returns an error, but wraps it, like a real +// function might. +func tryOpen() error { + _, err := os.Open("/i/dont/exist") + if err != nil { + return errwrap.Wrapf("Doesn't exist: {{err}}", err) + } + + return nil +} + +func main() { + err := tryOpen() + + // We can use the Contains helpers to check if an error contains + // another error. It is safe to do this with a nil error, or with + // an error that doesn't even use the errwrap package. + if errwrap.Contains(err, "does not exist") { + // Do something + } + if errwrap.ContainsType(err, new(os.PathError)) { + // Do something + } + + // Or we can use the associated `Get` functions to just extract + // a specific error. This would return nil if that specific error doesn't + // exist. + perr := errwrap.GetType(err, new(os.PathError)) +} +``` + +#### Custom Types + +If you're already making custom types that properly wrap errors, then +you can get all the functionality of `errwraps.Contains` and such by +implementing the `Wrapper` interface with just one function. Example: + +```go +type AppError { + Code ErrorCode + Err error +} + +func (e *AppError) WrappedErrors() []error { + return []error{e.Err} +} +``` + +Now this works: + +```go +err := &AppError{Err: fmt.Errorf("an error")} +if errwrap.ContainsType(err, fmt.Errorf("")) { + // This will work! +} +``` diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go new file mode 100644 index 0000000..44e368e --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/errwrap.go @@ -0,0 +1,178 @@ +// Package errwrap implements methods to formalize error wrapping in Go. +// +// All of the top-level functions that take an `error` are built to be able +// to take any error, not just wrapped errors. This allows you to use errwrap +// without having to type-check and type-cast everywhere. +package errwrap + +import ( + "errors" + "reflect" + "strings" +) + +// WalkFunc is the callback called for Walk. +type WalkFunc func(error) + +// Wrapper is an interface that can be implemented by custom types to +// have all the Contains, Get, etc. functions in errwrap work. +// +// When Walk reaches a Wrapper, it will call the callback for every +// wrapped error in addition to the wrapper itself. Since all the top-level +// functions in errwrap use Walk, this means that all those functions work +// with your custom type. +type Wrapper interface { + WrappedErrors() []error +} + +// Wrap defines that outer wraps inner, returning an error type that +// can be cleanly used with the other methods in this package, such as +// Contains, GetAll, etc. +// +// This function won't modify the error message at all (the outer message +// will be used). +func Wrap(outer, inner error) error { + return &wrappedError{ + Outer: outer, + Inner: inner, + } +} + +// Wrapf wraps an error with a formatting message. This is similar to using +// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap +// errors, you should replace it with this. +// +// format is the format of the error message. The string '{{err}}' will +// be replaced with the original error message. +// +// Deprecated: Use fmt.Errorf() +func Wrapf(format string, err error) error { + outerMsg := "" + if err != nil { + outerMsg = err.Error() + } + + outer := errors.New(strings.Replace( + format, "{{err}}", outerMsg, -1)) + + return Wrap(outer, err) +} + +// Contains checks if the given error contains an error with the +// message msg. If err is not a wrapped error, this will always return +// false unless the error itself happens to match this msg. +func Contains(err error, msg string) bool { + return len(GetAll(err, msg)) > 0 +} + +// ContainsType checks if the given error contains an error with +// the same concrete type as v. If err is not a wrapped error, this will +// check the err itself. +func ContainsType(err error, v interface{}) bool { + return len(GetAllType(err, v)) > 0 +} + +// Get is the same as GetAll but returns the deepest matching error. +func Get(err error, msg string) error { + es := GetAll(err, msg) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetType is the same as GetAllType but returns the deepest matching error. +func GetType(err error, v interface{}) error { + es := GetAllType(err, v) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetAll gets all the errors that might be wrapped in err with the +// given message. The order of the errors is such that the outermost +// matching error (the most recent wrap) is index zero, and so on. +func GetAll(err error, msg string) []error { + var result []error + + Walk(err, func(err error) { + if err.Error() == msg { + result = append(result, err) + } + }) + + return result +} + +// GetAllType gets all the errors that are the same type as v. +// +// The order of the return value is the same as described in GetAll. +func GetAllType(err error, v interface{}) []error { + var result []error + + var search string + if v != nil { + search = reflect.TypeOf(v).String() + } + Walk(err, func(err error) { + var needle string + if err != nil { + needle = reflect.TypeOf(err).String() + } + + if needle == search { + result = append(result, err) + } + }) + + return result +} + +// Walk walks all the wrapped errors in err and calls the callback. If +// err isn't a wrapped error, this will be called once for err. If err +// is a wrapped error, the callback will be called for both the wrapper +// that implements error as well as the wrapped error itself. +func Walk(err error, cb WalkFunc) { + if err == nil { + return + } + + switch e := err.(type) { + case *wrappedError: + cb(e.Outer) + Walk(e.Inner, cb) + case Wrapper: + cb(err) + + for _, err := range e.WrappedErrors() { + Walk(err, cb) + } + case interface{ Unwrap() error }: + cb(err) + Walk(e.Unwrap(), cb) + default: + cb(err) + } +} + +// wrappedError is an implementation of error that has both the +// outer and inner errors. +type wrappedError struct { + Outer error + Inner error +} + +func (w *wrappedError) Error() string { + return w.Outer.Error() +} + +func (w *wrappedError) WrappedErrors() []error { + return []error{w.Outer, w.Inner} +} + +func (w *wrappedError) Unwrap() error { + return w.Inner +} diff --git a/vendor/github.com/hashicorp/errwrap/go.mod b/vendor/github.com/hashicorp/errwrap/go.mod new file mode 100644 index 0000000..c9b8402 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/errwrap diff --git a/vendor/github.com/hashicorp/go-hclog/README.md b/vendor/github.com/hashicorp/go-hclog/README.md index 9b6845e..5d56f4b 100644 --- a/vendor/github.com/hashicorp/go-hclog/README.md +++ b/vendor/github.com/hashicorp/go-hclog/README.md @@ -132,7 +132,7 @@ Alternatively, you may configure the system-wide logger: ```go // log the standard logger from 'import "log"' -log.SetOutput(appLogger.Writer(&hclog.StandardLoggerOptions{InferLevels: true})) +log.SetOutput(appLogger.StandardWriter(&hclog.StandardLoggerOptions{InferLevels: true})) log.SetPrefix("") log.SetFlags(0) diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_unix.go b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go new file mode 100644 index 0000000..44aa9bf --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go @@ -0,0 +1,27 @@ +// +build !windows + +package hclog + +import ( + "github.com/mattn/go-isatty" +) + +// setColorization will mutate the values of this logger +// to approperately configure colorization options. It provides +// a wrapper to the output stream on Windows systems. +func (l *intLogger) setColorization(opts *LoggerOptions) { + switch opts.Color { + case ColorOff: + fallthrough + case ForceColor: + return + case AutoColor: + fi := l.checkWriterIsFile() + isUnixTerm := isatty.IsTerminal(fi.Fd()) + isCygwinTerm := isatty.IsCygwinTerminal(fi.Fd()) + isTerm := isUnixTerm || isCygwinTerm + if !isTerm { + l.writer.color = ColorOff + } + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_windows.go b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go new file mode 100644 index 0000000..23486b6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go @@ -0,0 +1,33 @@ +// +build windows + +package hclog + +import ( + "os" + + colorable "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +// setColorization will mutate the values of this logger +// to approperately configure colorization options. It provides +// a wrapper to the output stream on Windows systems. +func (l *intLogger) setColorization(opts *LoggerOptions) { + switch opts.Color { + case ColorOff: + return + case ForceColor: + fi := l.checkWriterIsFile() + l.writer.w = colorable.NewColorable(fi) + case AutoColor: + fi := l.checkWriterIsFile() + isUnixTerm := isatty.IsTerminal(os.Stdout.Fd()) + isCygwinTerm := isatty.IsCygwinTerminal(os.Stdout.Fd()) + isTerm := isUnixTerm || isCygwinTerm + if !isTerm { + l.writer.color = ColorOff + return + } + l.writer.w = colorable.NewColorable(fi) + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/exclude.go b/vendor/github.com/hashicorp/go-hclog/exclude.go new file mode 100644 index 0000000..cfd4307 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/exclude.go @@ -0,0 +1,71 @@ +package hclog + +import ( + "regexp" + "strings" +) + +// ExcludeByMessage provides a simple way to build a list of log messages that +// can be queried and matched. This is meant to be used with the Exclude +// option on Options to suppress log messages. This does not hold any mutexs +// within itself, so normal usage would be to Add entries at setup and none after +// Exclude is going to be called. Exclude is called with a mutex held within +// the Logger, so that doesn't need to use a mutex. Example usage: +// +// f := new(ExcludeByMessage) +// f.Add("Noisy log message text") +// appLogger.Exclude = f.Exclude +type ExcludeByMessage struct { + messages map[string]struct{} +} + +// Add a message to be filtered. Do not call this after Exclude is to be called +// due to concurrency issues. +func (f *ExcludeByMessage) Add(msg string) { + if f.messages == nil { + f.messages = make(map[string]struct{}) + } + + f.messages[msg] = struct{}{} +} + +// Return true if the given message should be included +func (f *ExcludeByMessage) Exclude(level Level, msg string, args ...interface{}) bool { + _, ok := f.messages[msg] + return ok +} + +// ExcludeByPrefix is a simple type to match a message string that has a common prefix. +type ExcludeByPrefix string + +// Matches an message that starts with the prefix. +func (p ExcludeByPrefix) Exclude(level Level, msg string, args ...interface{}) bool { + return strings.HasPrefix(msg, string(p)) +} + +// ExcludeByRegexp takes a regexp and uses it to match a log message string. If it matches +// the log entry is excluded. +type ExcludeByRegexp struct { + Regexp *regexp.Regexp +} + +// Exclude the log message if the message string matches the regexp +func (e ExcludeByRegexp) Exclude(level Level, msg string, args ...interface{}) bool { + return e.Regexp.MatchString(msg) +} + +// ExcludeFuncs is a slice of functions that will called to see if a log entry +// should be filtered or not. It stops calling functions once at least one returns +// true. +type ExcludeFuncs []func(level Level, msg string, args ...interface{}) bool + +// Calls each function until one of them returns true +func (ff ExcludeFuncs) Exclude(level Level, msg string, args ...interface{}) bool { + for _, f := range ff { + if f(level, msg, args...) { + return true + } + } + + return false +} diff --git a/vendor/github.com/hashicorp/go-hclog/global.go b/vendor/github.com/hashicorp/go-hclog/global.go index 3efc54c..22ebc57 100644 --- a/vendor/github.com/hashicorp/go-hclog/global.go +++ b/vendor/github.com/hashicorp/go-hclog/global.go @@ -20,6 +20,13 @@ var ( // Default returns a globally held logger. This can be a good starting // place, and then you can use .With() and .Name() to create sub-loggers // to be used in more specific contexts. +// The value of the Default logger can be set via SetDefault() or by +// changing the options in DefaultOptions. +// +// This method is goroutine safe, returning a global from memory, but +// cause should be used if SetDefault() is called it random times +// in the program as that may result in race conditions and an unexpected +// Logger being returned. func Default() Logger { protect.Do(func() { // If SetDefault was used before Default() was called, we need to @@ -41,6 +48,13 @@ func L() Logger { // to the one given. This allows packages to use the default logger // and have higher level packages change it to match the execution // environment. It returns any old default if there is one. +// +// NOTE: This is expected to be called early in the program to setup +// a default logger. As such, it does not attempt to make itself +// not racy with regard to the value of the default logger. Ergo +// if it is called in goroutines, you may experience race conditions +// with other goroutines retrieving the default logger. Basically, +// don't do that. func SetDefault(log Logger) Logger { old := def def = log diff --git a/vendor/github.com/hashicorp/go-hclog/go.mod b/vendor/github.com/hashicorp/go-hclog/go.mod index 0d079a6..b6698c0 100644 --- a/vendor/github.com/hashicorp/go-hclog/go.mod +++ b/vendor/github.com/hashicorp/go-hclog/go.mod @@ -2,6 +2,11 @@ module github.com/hashicorp/go-hclog require ( github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fatih/color v1.7.0 + github.com/mattn/go-colorable v0.1.4 + github.com/mattn/go-isatty v0.0.10 github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/testify v1.2.2 ) + +go 1.13 diff --git a/vendor/github.com/hashicorp/go-hclog/go.sum b/vendor/github.com/hashicorp/go-hclog/go.sum index e03ee77..3a656df 100644 --- a/vendor/github.com/hashicorp/go-hclog/go.sum +++ b/vendor/github.com/hashicorp/go-hclog/go.sum @@ -1,6 +1,18 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go new file mode 100644 index 0000000..d8e2e76 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go @@ -0,0 +1,235 @@ +package hclog + +import ( + "io" + "log" + "sync" + "sync/atomic" +) + +var _ Logger = &interceptLogger{} + +type interceptLogger struct { + Logger + + mu *sync.Mutex + sinkCount *int32 + Sinks map[SinkAdapter]struct{} +} + +func NewInterceptLogger(opts *LoggerOptions) InterceptLogger { + intercept := &interceptLogger{ + Logger: New(opts), + mu: new(sync.Mutex), + sinkCount: new(int32), + Sinks: make(map[SinkAdapter]struct{}), + } + + atomic.StoreInt32(intercept.sinkCount, 0) + + return intercept +} + +func (i *interceptLogger) Log(level Level, msg string, args ...interface{}) { + i.Logger.Log(level, msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), level, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at TRACE level to log and sinks +func (i *interceptLogger) Trace(msg string, args ...interface{}) { + i.Logger.Trace(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Trace, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at DEBUG level to log and sinks +func (i *interceptLogger) Debug(msg string, args ...interface{}) { + i.Logger.Debug(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Debug, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at INFO level to log and sinks +func (i *interceptLogger) Info(msg string, args ...interface{}) { + i.Logger.Info(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Info, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at WARN level to log and sinks +func (i *interceptLogger) Warn(msg string, args ...interface{}) { + i.Logger.Warn(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Warn, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at ERROR level to log and sinks +func (i *interceptLogger) Error(msg string, args ...interface{}) { + i.Logger.Error(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Error, msg, i.retrieveImplied(args...)...) + } +} + +func (i *interceptLogger) retrieveImplied(args ...interface{}) []interface{} { + top := i.Logger.ImpliedArgs() + + cp := make([]interface{}, len(top)+len(args)) + copy(cp, top) + copy(cp[len(top):], args) + + return cp +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +// Registered sinks will subscribe to these messages as well. +func (i *interceptLogger) Named(name string) Logger { + return i.NamedIntercept(name) +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. Registered sinks will subscribe +// to these messages as well. +func (i *interceptLogger) ResetNamed(name string) Logger { + return i.ResetNamedIntercept(name) +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +// Registered sinks will subscribe to these messages as well. +func (i *interceptLogger) NamedIntercept(name string) InterceptLogger { + var sub interceptLogger + + sub = *i + sub.Logger = i.Logger.Named(name) + return &sub +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. Registered sinks will subscribe +// to these messages as well. +func (i *interceptLogger) ResetNamedIntercept(name string) InterceptLogger { + var sub interceptLogger + + sub = *i + sub.Logger = i.Logger.ResetNamed(name) + return &sub +} + +// Return a sub-Logger for which every emitted log message will contain +// the given key/value pairs. This is used to create a context specific +// Logger. +func (i *interceptLogger) With(args ...interface{}) Logger { + var sub interceptLogger + + sub = *i + + sub.Logger = i.Logger.With(args...) + + return &sub +} + +// RegisterSink attaches a SinkAdapter to interceptLoggers sinks. +func (i *interceptLogger) RegisterSink(sink SinkAdapter) { + i.mu.Lock() + defer i.mu.Unlock() + + i.Sinks[sink] = struct{}{} + + atomic.AddInt32(i.sinkCount, 1) +} + +// DeregisterSink removes a SinkAdapter from interceptLoggers sinks. +func (i *interceptLogger) DeregisterSink(sink SinkAdapter) { + i.mu.Lock() + defer i.mu.Unlock() + + delete(i.Sinks, sink) + + atomic.AddInt32(i.sinkCount, -1) +} + +func (i *interceptLogger) StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger { + return i.StandardLogger(opts) +} + +func (i *interceptLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + if opts == nil { + opts = &StandardLoggerOptions{} + } + + return log.New(i.StandardWriter(opts), "", 0) +} + +func (i *interceptLogger) StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer { + return i.StandardWriter(opts) +} + +func (i *interceptLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + return &stdlogAdapter{ + log: i, + inferLevels: opts.InferLevels, + forceLevel: opts.ForceLevel, + } +} + +func (i *interceptLogger) ResetOutput(opts *LoggerOptions) error { + if or, ok := i.Logger.(OutputResettable); ok { + return or.ResetOutput(opts) + } else { + return nil + } +} + +func (i *interceptLogger) ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error { + if or, ok := i.Logger.(OutputResettable); ok { + return or.ResetOutputWithFlush(opts, flushable) + } else { + return nil + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go index 219656c..f961ed9 100644 --- a/vendor/github.com/hashicorp/go-hclog/intlogger.go +++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -4,9 +4,11 @@ import ( "bytes" "encoding" "encoding/json" + "errors" "fmt" "io" "log" + "os" "reflect" "runtime" "sort" @@ -15,6 +17,8 @@ import ( "sync" "sync/atomic" "time" + + "github.com/fatih/color" ) // TimeFormat to use for logging. This is a version of RFC3339 that contains @@ -32,6 +36,14 @@ var ( Warn: "[WARN] ", Error: "[ERROR]", } + + _levelToColor = map[Level]*color.Color{ + Debug: color.New(color.FgHiWhite), + Trace: color.New(color.FgHiGreen), + Info: color.New(color.FgHiBlue), + Warn: color.New(color.FgHiYellow), + Error: color.New(color.FgHiRed), + } ) // Make sure that intLogger is a Logger @@ -45,17 +57,32 @@ type intLogger struct { name string timeFormat string - // This is a pointer so that it's shared by any derived loggers, since + // This is an interface so that it's shared by any derived loggers, since // those derived loggers share the bufio.Writer as well. - mutex *sync.Mutex + mutex Locker writer *writer level *int32 implied []interface{} + + exclude func(level Level, msg string, args ...interface{}) bool + + // create subloggers with their own level setting + independentLevels bool } // New returns a configured logger. func New(opts *LoggerOptions) Logger { + return newLogger(opts) +} + +// NewSinkAdapter returns a SinkAdapter with configured settings +// defined by LoggerOptions +func NewSinkAdapter(opts *LoggerOptions) SinkAdapter { + return newLogger(opts) +} + +func newLogger(opts *LoggerOptions) *intLogger { if opts == nil { opts = &LoggerOptions{} } @@ -76,16 +103,22 @@ func New(opts *LoggerOptions) Logger { } l := &intLogger{ - json: opts.JSONFormat, - caller: opts.IncludeLocation, - name: opts.Name, - timeFormat: TimeFormat, - mutex: mutex, - writer: newWriter(output), - level: new(int32), + json: opts.JSONFormat, + caller: opts.IncludeLocation, + name: opts.Name, + timeFormat: TimeFormat, + mutex: mutex, + writer: newWriter(output, opts.Color), + level: new(int32), + exclude: opts.Exclude, + independentLevels: opts.IndependentLevels, } - if opts.TimeFormat != "" { + l.setColorization(opts) + + if opts.DisableTime { + l.timeFormat = "" + } else if opts.TimeFormat != "" { l.timeFormat = opts.TimeFormat } @@ -96,7 +129,7 @@ func New(opts *LoggerOptions) Logger { // Log a message and a set of key/value pairs if the given level is at // or more severe that the threshold configured in the Logger. -func (l *intLogger) Log(level Level, msg string, args ...interface{}) { +func (l *intLogger) log(name string, level Level, msg string, args ...interface{}) { if level < Level(atomic.LoadInt32(l.level)) { return } @@ -106,10 +139,14 @@ func (l *intLogger) Log(level Level, msg string, args ...interface{}) { l.mutex.Lock() defer l.mutex.Unlock() + if l.exclude != nil && l.exclude(level, msg, args...) { + return + } + if l.json { - l.logJSON(t, level, msg, args...) + l.logJSON(t, name, level, msg, args...) } else { - l.log(t, level, msg, args...) + l.logPlain(t, name, level, msg, args...) } l.writer.Flush(level) @@ -145,9 +182,11 @@ func trimCallerPath(path string) string { } // Non-JSON logging format function -func (l *intLogger) log(t time.Time, level Level, msg string, args ...interface{}) { - l.writer.WriteString(t.Format(l.timeFormat)) - l.writer.WriteByte(' ') +func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, args ...interface{}) { + if len(l.timeFormat) > 0 { + l.writer.WriteString(t.Format(l.timeFormat)) + l.writer.WriteByte(' ') + } s, ok := _levelToBracket[level] if ok { @@ -156,8 +195,17 @@ func (l *intLogger) log(t time.Time, level Level, msg string, args ...interface{ l.writer.WriteString("[?????]") } + offset := 3 if l.caller { - if _, file, line, ok := runtime.Caller(3); ok { + // Check if the caller is inside our package and inside + // a logger implementation file + if _, file, _, ok := runtime.Caller(3); ok { + if strings.HasSuffix(file, "intlogger.go") || strings.HasSuffix(file, "interceptlogger.go") { + offset = 4 + } + } + + if _, file, line, ok := runtime.Caller(offset); ok { l.writer.WriteByte(' ') l.writer.WriteString(trimCallerPath(file)) l.writer.WriteByte(':') @@ -168,8 +216,8 @@ func (l *intLogger) log(t time.Time, level Level, msg string, args ...interface{ l.writer.WriteByte(' ') - if l.name != "" { - l.writer.WriteString(l.name) + if name != "" { + l.writer.WriteString(name) l.writer.WriteString(": ") } @@ -186,7 +234,8 @@ func (l *intLogger) log(t time.Time, level Level, msg string, args ...interface{ args = args[:len(args)-1] stacktrace = cs } else { - args = append(args, "") + extra := args[len(args)-1] + args = append(args[:len(args)-1], MissingKey, extra) } } @@ -222,6 +271,12 @@ func (l *intLogger) log(t time.Time, level Level, msg string, args ...interface{ val = strconv.FormatUint(uint64(st), 10) case uint8: val = strconv.FormatUint(uint64(st), 10) + case Hex: + val = "0x" + strconv.FormatUint(uint64(st), 16) + case Octal: + val = "0" + strconv.FormatUint(uint64(st), 8) + case Binary: + val = "0b" + strconv.FormatUint(uint64(st), 2) case CapturedStacktrace: stacktrace = st continue FOR @@ -238,7 +293,12 @@ func (l *intLogger) log(t time.Time, level Level, msg string, args ...interface{ } l.writer.WriteByte(' ') - l.writer.WriteString(args[i].(string)) + switch st := args[i].(type) { + case string: + l.writer.WriteString(st) + default: + l.writer.WriteString(fmt.Sprintf("%s", st)) + } l.writer.WriteByte('=') if !raw && strings.ContainsAny(val, " \t\n\r") { @@ -255,6 +315,7 @@ func (l *intLogger) log(t time.Time, level Level, msg string, args ...interface{ if stacktrace != "" { l.writer.WriteString(string(stacktrace)) + l.writer.WriteString("\n") } } @@ -298,8 +359,8 @@ func (l *intLogger) renderSlice(v reflect.Value) string { } // JSON logging function -func (l *intLogger) logJSON(t time.Time, level Level, msg string, args ...interface{}) { - vals := l.jsonMapEntry(t, level, msg) +func (l *intLogger) logJSON(t time.Time, name string, level Level, msg string, args ...interface{}) { + vals := l.jsonMapEntry(t, name, level, msg) args = append(l.implied, args...) if args != nil && len(args) > 0 { @@ -309,16 +370,12 @@ func (l *intLogger) logJSON(t time.Time, level Level, msg string, args ...interf args = args[:len(args)-1] vals["stacktrace"] = cs } else { - args = append(args, "") + extra := args[len(args)-1] + args = append(args[:len(args)-1], MissingKey, extra) } } for i := 0; i < len(args); i = i + 2 { - if _, ok := args[i].(string); !ok { - // As this is the logging function not much we can do here - // without injecting into logs... - continue - } val := args[i+1] switch sv := val.(type) { case error: @@ -334,14 +391,22 @@ func (l *intLogger) logJSON(t time.Time, level Level, msg string, args ...interf val = fmt.Sprintf(sv[0].(string), sv[1:]...) } - vals[args[i].(string)] = val + var key string + + switch st := args[i].(type) { + case string: + key = st + default: + key = fmt.Sprintf("%s", st) + } + vals[key] = val } } err := json.NewEncoder(l.writer).Encode(vals) if err != nil { if _, ok := err.(*json.UnsupportedTypeError); ok { - plainVal := l.jsonMapEntry(t, level, msg) + plainVal := l.jsonMapEntry(t, name, level, msg) plainVal["@warn"] = errJsonUnsupportedTypeMsg json.NewEncoder(l.writer).Encode(plainVal) @@ -349,7 +414,7 @@ func (l *intLogger) logJSON(t time.Time, level Level, msg string, args ...interf } } -func (l intLogger) jsonMapEntry(t time.Time, level Level, msg string) map[string]interface{} { +func (l intLogger) jsonMapEntry(t time.Time, name string, level Level, msg string) map[string]interface{} { vals := map[string]interface{}{ "@message": msg, "@timestamp": t.Format("2006-01-02T15:04:05.000000Z07:00"), @@ -373,8 +438,8 @@ func (l intLogger) jsonMapEntry(t time.Time, level Level, msg string) map[string vals["@level"] = levelStr - if l.name != "" { - vals["@module"] = l.name + if name != "" { + vals["@module"] = name } if l.caller { @@ -385,29 +450,34 @@ func (l intLogger) jsonMapEntry(t time.Time, level Level, msg string) map[string return vals } +// Emit the message and args at the provided level +func (l *intLogger) Log(level Level, msg string, args ...interface{}) { + l.log(l.Name(), level, msg, args...) +} + // Emit the message and args at DEBUG level func (l *intLogger) Debug(msg string, args ...interface{}) { - l.Log(Debug, msg, args...) + l.log(l.Name(), Debug, msg, args...) } // Emit the message and args at TRACE level func (l *intLogger) Trace(msg string, args ...interface{}) { - l.Log(Trace, msg, args...) + l.log(l.Name(), Trace, msg, args...) } // Emit the message and args at INFO level func (l *intLogger) Info(msg string, args ...interface{}) { - l.Log(Info, msg, args...) + l.log(l.Name(), Info, msg, args...) } // Emit the message and args at WARN level func (l *intLogger) Warn(msg string, args ...interface{}) { - l.Log(Warn, msg, args...) + l.log(l.Name(), Warn, msg, args...) } // Emit the message and args at ERROR level func (l *intLogger) Error(msg string, args ...interface{}) { - l.Log(Error, msg, args...) + l.log(l.Name(), Error, msg, args...) } // Indicate that the logger would emit TRACE level logs @@ -435,15 +505,20 @@ func (l *intLogger) IsError() bool { return Level(atomic.LoadInt32(l.level)) <= Error } +const MissingKey = "EXTRA_VALUE_AT_END" + // Return a sub-Logger for which every emitted log message will contain // the given key/value pairs. This is used to create a context specific // Logger. func (l *intLogger) With(args ...interface{}) Logger { + var extra interface{} + if len(args)%2 != 0 { - panic("With() call requires paired arguments") + extra = args[len(args)-1] + args = args[:len(args)-1] } - sl := *l + sl := l.copy() result := make(map[string]interface{}, len(l.implied)+len(args)) keys := make([]string, 0, len(l.implied)+len(args)) @@ -473,13 +548,17 @@ func (l *intLogger) With(args ...interface{}) Logger { sl.implied = append(sl.implied, result[k]) } - return &sl + if extra != nil { + sl.implied = append(sl.implied, MissingKey, extra) + } + + return sl } // Create a new sub-Logger that a name decending from the current name. // This is used to create a subsystem specific Logger. func (l *intLogger) Named(name string) Logger { - sl := *l + sl := l.copy() if sl.name != "" { sl.name = sl.name + "." + name @@ -487,18 +566,53 @@ func (l *intLogger) Named(name string) Logger { sl.name = name } - return &sl + return sl } // Create a new sub-Logger with an explicit name. This ignores the current // name. This is used to create a standalone logger that doesn't fall // within the normal hierarchy. func (l *intLogger) ResetNamed(name string) Logger { - sl := *l + sl := l.copy() sl.name = name - return &sl + return sl +} + +func (l *intLogger) ResetOutput(opts *LoggerOptions) error { + if opts.Output == nil { + return errors.New("given output is nil") + } + + l.mutex.Lock() + defer l.mutex.Unlock() + + return l.resetOutput(opts) +} + +func (l *intLogger) ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error { + if opts.Output == nil { + return errors.New("given output is nil") + } + if flushable == nil { + return errors.New("flushable is nil") + } + + l.mutex.Lock() + defer l.mutex.Unlock() + + if err := flushable.Flush(); err != nil { + return err + } + + return l.resetOutput(opts) +} + +func (l *intLogger) resetOutput(opts *LoggerOptions) error { + l.writer = newWriter(opts.Output, opts.Color) + l.setColorization(opts) + return nil } // Update the logging level on-the-fly. This will affect all subloggers as @@ -525,3 +639,41 @@ func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { forceLevel: opts.ForceLevel, } } + +// checks if the underlying io.Writer is a file, and +// panics if not. For use by colorization. +func (l *intLogger) checkWriterIsFile() *os.File { + fi, ok := l.writer.w.(*os.File) + if !ok { + panic("Cannot enable coloring of non-file Writers") + } + return fi +} + +// Accept implements the SinkAdapter interface +func (i *intLogger) Accept(name string, level Level, msg string, args ...interface{}) { + i.log(name, level, msg, args...) +} + +// ImpliedArgs returns the loggers implied args +func (i *intLogger) ImpliedArgs() []interface{} { + return i.implied +} + +// Name returns the loggers name +func (i *intLogger) Name() string { + return i.name +} + +// copy returns a shallow copy of the intLogger, replacing the level pointer +// when necessary +func (l *intLogger) copy() *intLogger { + sl := *l + + if l.independentLevels { + sl.level = new(int32) + *sl.level = *l.level + } + + return &sl +} diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go index 080ed79..83eafc1 100644 --- a/vendor/github.com/hashicorp/go-hclog/logger.go +++ b/vendor/github.com/hashicorp/go-hclog/logger.go @@ -5,7 +5,6 @@ import ( "log" "os" "strings" - "sync" ) var ( @@ -39,6 +38,9 @@ const ( // Error information about unrecoverable events. Error Level = 5 + + // Off disables all logging output. + Off Level = 6 ) // Format is a simple convience type for when formatting is required. When @@ -53,6 +55,33 @@ func Fmt(str string, args ...interface{}) Format { return append(Format{str}, args...) } +// A simple shortcut to format numbers in hex when displayed with the normal +// text output. For example: L.Info("header value", Hex(17)) +type Hex int + +// A simple shortcut to format numbers in octal when displayed with the normal +// text output. For example: L.Info("perms", Octal(17)) +type Octal int + +// A simple shortcut to format numbers in binary when displayed with the normal +// text output. For example: L.Info("bits", Binary(17)) +type Binary int + +// ColorOption expresses how the output should be colored, if at all. +type ColorOption uint8 + +const ( + // ColorOff is the default coloration, and does not + // inject color codes into the io.Writer. + ColorOff ColorOption = iota + // AutoColor checks if the io.Writer is a tty, + // and if so enables coloring. + AutoColor + // ForceColor will enable coloring, regardless of whether + // the io.Writer is a tty or not. + ForceColor +) + // LevelFromString returns a Level type for the named log level, or "NoLevel" if // the level string is invalid. This facilitates setting the log level via // config or environment variable by name in a predictable way. @@ -70,16 +99,42 @@ func LevelFromString(levelStr string) Level { return Warn case "error": return Error + case "off": + return Off default: return NoLevel } } +func (l Level) String() string { + switch l { + case Trace: + return "trace" + case Debug: + return "debug" + case Info: + return "info" + case Warn: + return "warn" + case Error: + return "error" + case NoLevel: + return "none" + case Off: + return "off" + default: + return "unknown" + } +} + // Logger describes the interface that must be implemeted by all loggers. type Logger interface { // Args are alternating key, val pairs // keys must be strings // vals can be any type, but display is implementation specific + // Emit a message and key/value pairs at a provided log level + Log(level Level, msg string, args ...interface{}) + // Emit a message and key/value pairs at the TRACE level Trace(msg string, args ...interface{}) @@ -111,9 +166,15 @@ type Logger interface { // Indicate if ERROR logs would be emitted. This and the other Is* guards IsError() bool + // ImpliedArgs returns With key/value pairs + ImpliedArgs() []interface{} + // Creates a sublogger that will always have the given key/value pairs With(args ...interface{}) Logger + // Returns the Name of the logger + Name() string + // Create a logger that will prepend the name string on the front of all messages. // If the logger already has a name, the new value will be appended to the current // name. That way, a major subsystem can use this to decorate all it's own logs @@ -125,7 +186,8 @@ type Logger interface { // the current name as well. ResetNamed(name string) Logger - // Updates the level. This should affect all sub-loggers as well. If an + // Updates the level. This should affect all related loggers as well, + // unless they were created with IndependentLevels. If an // implementation cannot update the level on the fly, it should no-op. SetLevel(level Level) @@ -162,8 +224,10 @@ type LoggerOptions struct { // Where to write the logs to. Defaults to os.Stderr if nil Output io.Writer - // An optional mutex pointer in case Output is shared - Mutex *sync.Mutex + // An optional Locker in case Output is shared. This can be a sync.Mutex or + // a NoopLocker if the caller wants control over output, e.g. for batching + // log lines. + Mutex Locker // Control if the output should be in JSON. JSONFormat bool @@ -173,4 +237,105 @@ type LoggerOptions struct { // The time format to use instead of the default TimeFormat string + + // Control whether or not to display the time at all. This is required + // because setting TimeFormat to empty assumes the default format. + DisableTime bool + + // Color the output. On Windows, colored logs are only avaiable for io.Writers that + // are concretely instances of *os.File. + Color ColorOption + + // A function which is called with the log information and if it returns true the value + // should not be logged. + // This is useful when interacting with a system that you wish to suppress the log + // message for (because it's too noisy, etc) + Exclude func(level Level, msg string, args ...interface{}) bool + + // IndependentLevels causes subloggers to be created with an independent + // copy of this logger's level. This means that using SetLevel on this + // logger will not effect any subloggers, and SetLevel on any subloggers + // will not effect the parent or sibling loggers. + IndependentLevels bool } + +// InterceptLogger describes the interface for using a logger +// that can register different output sinks. +// This is useful for sending lower level log messages +// to a different output while keeping the root logger +// at a higher one. +type InterceptLogger interface { + // Logger is the root logger for an InterceptLogger + Logger + + // RegisterSink adds a SinkAdapter to the InterceptLogger + RegisterSink(sink SinkAdapter) + + // DeregisterSink removes a SinkAdapter from the InterceptLogger + DeregisterSink(sink SinkAdapter) + + // Create a interceptlogger that will prepend the name string on the front of all messages. + // If the logger already has a name, the new value will be appended to the current + // name. That way, a major subsystem can use this to decorate all it's own logs + // without losing context. + NamedIntercept(name string) InterceptLogger + + // Create a interceptlogger that will prepend the name string on the front of all messages. + // This sets the name of the logger to the value directly, unlike Named which honor + // the current name as well. + ResetNamedIntercept(name string) InterceptLogger + + // Deprecated: use StandardLogger + StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger + + // Deprecated: use StandardWriter + StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer +} + +// SinkAdapter describes the interface that must be implemented +// in order to Register a new sink to an InterceptLogger +type SinkAdapter interface { + Accept(name string, level Level, msg string, args ...interface{}) +} + +// Flushable represents a method for flushing an output buffer. It can be used +// if Resetting the log to use a new output, in order to flush the writes to +// the existing output beforehand. +type Flushable interface { + Flush() error +} + +// OutputResettable provides ways to swap the output in use at runtime +type OutputResettable interface { + // ResetOutput swaps the current output writer with the one given in the + // opts. Color options given in opts will be used for the new output. + ResetOutput(opts *LoggerOptions) error + + // ResetOutputWithFlush swaps the current output writer with the one given + // in the opts, first calling Flush on the given Flushable. Color options + // given in opts will be used for the new output. + ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error +} + +// Locker is used for locking output. If not set when creating a logger, a +// sync.Mutex will be used internally. +type Locker interface { + // Lock is called when the output is going to be changed or written to + Lock() + + // Unlock is called when the operation that called Lock() completes + Unlock() +} + +// NoopLocker implements locker but does nothing. This is useful if the client +// wants tight control over locking, in order to provide grouping of log +// entries or other functionality. +type NoopLocker struct{} + +// Lock does nothing +func (n NoopLocker) Lock() {} + +// Unlock does nothing +func (n NoopLocker) Unlock() {} + +var _ Locker = (*NoopLocker)(nil) diff --git a/vendor/github.com/hashicorp/go-hclog/nulllogger.go b/vendor/github.com/hashicorp/go-hclog/nulllogger.go index 7ad6b35..bc14f77 100644 --- a/vendor/github.com/hashicorp/go-hclog/nulllogger.go +++ b/vendor/github.com/hashicorp/go-hclog/nulllogger.go @@ -15,6 +15,8 @@ func NewNullLogger() Logger { type nullLogger struct{} +func (l *nullLogger) Log(level Level, msg string, args ...interface{}) {} + func (l *nullLogger) Trace(msg string, args ...interface{}) {} func (l *nullLogger) Debug(msg string, args ...interface{}) {} @@ -35,8 +37,12 @@ func (l *nullLogger) IsWarn() bool { return false } func (l *nullLogger) IsError() bool { return false } +func (l *nullLogger) ImpliedArgs() []interface{} { return []interface{}{} } + func (l *nullLogger) With(args ...interface{}) Logger { return l } +func (l *nullLogger) Name() string { return "" } + func (l *nullLogger) Named(name string) Logger { return l } func (l *nullLogger) ResetNamed(name string) Logger { return l } diff --git a/vendor/github.com/hashicorp/go-hclog/stdlog.go b/vendor/github.com/hashicorp/go-hclog/stdlog.go index 044a469..f35d875 100644 --- a/vendor/github.com/hashicorp/go-hclog/stdlog.go +++ b/vendor/github.com/hashicorp/go-hclog/stdlog.go @@ -2,6 +2,7 @@ package hclog import ( "bytes" + "log" "strings" ) @@ -25,36 +26,10 @@ func (s *stdlogAdapter) Write(data []byte) (int, error) { _, str := s.pickLevel(str) // Log at the forced level - switch s.forceLevel { - case Trace: - s.log.Trace(str) - case Debug: - s.log.Debug(str) - case Info: - s.log.Info(str) - case Warn: - s.log.Warn(str) - case Error: - s.log.Error(str) - default: - s.log.Info(str) - } + s.dispatch(str, s.forceLevel) } else if s.inferLevels { level, str := s.pickLevel(str) - switch level { - case Trace: - s.log.Trace(str) - case Debug: - s.log.Debug(str) - case Info: - s.log.Info(str) - case Warn: - s.log.Warn(str) - case Error: - s.log.Error(str) - default: - s.log.Info(str) - } + s.dispatch(str, level) } else { s.log.Info(str) } @@ -62,6 +37,23 @@ func (s *stdlogAdapter) Write(data []byte) (int, error) { return len(data), nil } +func (s *stdlogAdapter) dispatch(str string, level Level) { + switch level { + case Trace: + s.log.Trace(str) + case Debug: + s.log.Debug(str) + case Info: + s.log.Info(str) + case Warn: + s.log.Warn(str) + case Error: + s.log.Error(str) + default: + s.log.Info(str) + } +} + // Detect, based on conventions, what log level this is. func (s *stdlogAdapter) pickLevel(str string) (Level, string) { switch { @@ -81,3 +73,23 @@ func (s *stdlogAdapter) pickLevel(str string) (Level, string) { return Info, str } } + +type logWriter struct { + l *log.Logger +} + +func (l *logWriter) Write(b []byte) (int, error) { + l.l.Println(string(bytes.TrimRight(b, " \n\t"))) + return len(b), nil +} + +// Takes a standard library logger and returns a Logger that will write to it +func FromStandardLogger(l *log.Logger, opts *LoggerOptions) Logger { + var dl LoggerOptions = *opts + + // Use the time format that log.Logger uses + dl.DisableTime = true + dl.Output = &logWriter{l} + + return New(&dl) +} diff --git a/vendor/github.com/hashicorp/go-hclog/writer.go b/vendor/github.com/hashicorp/go-hclog/writer.go index 7e8ec72..421a1f0 100644 --- a/vendor/github.com/hashicorp/go-hclog/writer.go +++ b/vendor/github.com/hashicorp/go-hclog/writer.go @@ -6,19 +6,27 @@ import ( ) type writer struct { - b bytes.Buffer - w io.Writer + b bytes.Buffer + w io.Writer + color ColorOption } -func newWriter(w io.Writer) *writer { - return &writer{w: w} +func newWriter(w io.Writer, color ColorOption) *writer { + return &writer{w: w, color: color} } func (w *writer) Flush(level Level) (err error) { + var unwritten = w.b.Bytes() + + if w.color != ColorOff { + color := _levelToColor[level] + unwritten = []byte(color.Sprintf("%s", unwritten)) + } + if lw, ok := w.w.(LevelWriter); ok { - _, err = lw.LevelWrite(level, w.b.Bytes()) + _, err = lw.LevelWrite(level, unwritten) } else { - _, err = w.w.Write(w.b.Bytes()) + _, err = w.w.Write(unwritten) } w.b.Reset() return err diff --git a/vendor/github.com/hashicorp/go-multierror/.travis.yml b/vendor/github.com/hashicorp/go-multierror/.travis.yml new file mode 100644 index 0000000..24b8038 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/.travis.yml @@ -0,0 +1,12 @@ +sudo: false + +language: go + +go: + - 1.x + +branches: + only: + - master + +script: env GO111MODULE=on make test testrace diff --git a/vendor/github.com/hashicorp/go-multierror/LICENSE b/vendor/github.com/hashicorp/go-multierror/LICENSE new file mode 100644 index 0000000..82b4de9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-multierror/Makefile b/vendor/github.com/hashicorp/go-multierror/Makefile new file mode 100644 index 0000000..b97cd6e --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/Makefile @@ -0,0 +1,31 @@ +TEST?=./... + +default: test + +# test runs the test suite and vets the code. +test: generate + @echo "==> Running tests..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS} + +# testrace runs the race checker +testrace: generate + @echo "==> Running tests (race)..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -race ${TESTARGS} + +# updatedeps installs all the dependencies needed to run and build. +updatedeps: + @sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'" + +# generate runs `go generate` to build the dynamically generated source files. +generate: + @echo "==> Generating..." + @find . -type f -name '.DS_Store' -delete + @go list ./... \ + | grep -v "/vendor/" \ + | xargs -n1 go generate + +.PHONY: default test testrace updatedeps generate diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md new file mode 100644 index 0000000..e92fa61 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/README.md @@ -0,0 +1,131 @@ +# go-multierror + +[![Build Status](http://img.shields.io/travis/hashicorp/go-multierror.svg?style=flat-square)][travis] +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[travis]: https://travis-ci.org/hashicorp/go-multierror +[godocs]: https://godoc.org/github.com/hashicorp/go-multierror + +`go-multierror` is a package for Go that provides a mechanism for +representing a list of `error` values as a single `error`. + +This allows a function in Go to return an `error` that might actually +be a list of errors. If the caller knows this, they can unwrap the +list and access the errors. If the caller doesn't know, the error +formats to a nice human-readable format. + +`go-multierror` is fully compatible with the Go standard library +[errors](https://golang.org/pkg/errors/) package, including the +functions `As`, `Is`, and `Unwrap`. This provides a standardized approach +for introspecting on error values. + +## Installation and Docs + +Install using `go get github.com/hashicorp/go-multierror`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/go-multierror + +## Usage + +go-multierror is easy to use and purposely built to be unobtrusive in +existing Go applications/libraries that may not be aware of it. + +**Building a list of errors** + +The `Append` function is used to create a list of errors. This function +behaves a lot like the Go built-in `append` function: it doesn't matter +if the first argument is nil, a `multierror.Error`, or any other `error`, +the function behaves as you would expect. + +```go +var result error + +if err := step1(); err != nil { + result = multierror.Append(result, err) +} +if err := step2(); err != nil { + result = multierror.Append(result, err) +} + +return result +``` + +**Customizing the formatting of the errors** + +By specifying a custom `ErrorFormat`, you can customize the format +of the `Error() string` function: + +```go +var result *multierror.Error + +// ... accumulate errors here, maybe using Append + +if result != nil { + result.ErrorFormat = func([]error) string { + return "errors!" + } +} +``` + +**Accessing the list of errors** + +`multierror.Error` implements `error` so if the caller doesn't know about +multierror, it will work just fine. But if you're aware a multierror might +be returned, you can use type switches to access the list of errors: + +```go +if err := something(); err != nil { + if merr, ok := err.(*multierror.Error); ok { + // Use merr.Errors + } +} +``` + +You can also use the standard [`errors.Unwrap`](https://golang.org/pkg/errors/#Unwrap) +function. This will continue to unwrap into subsequent errors until none exist. + +**Extracting an error** + +The standard library [`errors.As`](https://golang.org/pkg/errors/#As) +function can be used directly with a multierror to extract a specific error: + +```go +// Assume err is a multierror value +err := somefunc() + +// We want to know if "err" has a "RichErrorType" in it and extract it. +var errRich RichErrorType +if errors.As(err, &errRich) { + // It has it, and now errRich is populated. +} +``` + +**Checking for an exact error value** + +Some errors are returned as exact errors such as the [`ErrNotExist`](https://golang.org/pkg/os/#pkg-variables) +error in the `os` package. You can check if this error is present by using +the standard [`errors.Is`](https://golang.org/pkg/errors/#Is) function. + +```go +// Assume err is a multierror value +err := somefunc() +if errors.Is(err, os.ErrNotExist) { + // err contains os.ErrNotExist +} +``` + +**Returning a multierror only if there are errors** + +If you build a `multierror.Error`, you can use the `ErrorOrNil` function +to return an `error` implementation only if there are errors to return: + +```go +var result *multierror.Error + +// ... accumulate errors here + +// Return the `error` only if errors were added to the multierror, otherwise +// return nil since there are no errors. +return result.ErrorOrNil() +``` diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go new file mode 100644 index 0000000..775b6e7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/append.go @@ -0,0 +1,41 @@ +package multierror + +// Append is a helper function that will append more errors +// onto an Error in order to create a larger multi-error. +// +// If err is not a multierror.Error, then it will be turned into +// one. If any of the errs are multierr.Error, they will be flattened +// one level into err. +func Append(err error, errs ...error) *Error { + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Go through each error and flatten + for _, e := range errs { + switch e := e.(type) { + case *Error: + if e != nil { + err.Errors = append(err.Errors, e.Errors...) + } + default: + if e != nil { + err.Errors = append(err.Errors, e) + } + } + } + + return err + default: + newErrs := make([]error, 0, len(errs)+1) + if err != nil { + newErrs = append(newErrs, err) + } + newErrs = append(newErrs, errs...) + + return Append(&Error{}, newErrs...) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/flatten.go b/vendor/github.com/hashicorp/go-multierror/flatten.go new file mode 100644 index 0000000..aab8e9a --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/flatten.go @@ -0,0 +1,26 @@ +package multierror + +// Flatten flattens the given error, merging any *Errors together into +// a single *Error. +func Flatten(err error) error { + // If it isn't an *Error, just return the error as-is + if _, ok := err.(*Error); !ok { + return err + } + + // Otherwise, make the result and flatten away! + flatErr := new(Error) + flatten(err, flatErr) + return flatErr +} + +func flatten(err error, flatErr *Error) { + switch err := err.(type) { + case *Error: + for _, e := range err.Errors { + flatten(e, flatErr) + } + default: + flatErr.Errors = append(flatErr.Errors, err) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go new file mode 100644 index 0000000..47f13c4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/format.go @@ -0,0 +1,27 @@ +package multierror + +import ( + "fmt" + "strings" +) + +// ErrorFormatFunc is a function callback that is called by Error to +// turn the list of errors into a string. +type ErrorFormatFunc func([]error) string + +// ListFormatFunc is a basic formatter that outputs the number of errors +// that occurred along with a bullet point list of the errors. +func ListFormatFunc(es []error) string { + if len(es) == 1 { + return fmt.Sprintf("1 error occurred:\n\t* %s\n\n", es[0]) + } + + points := make([]string, len(es)) + for i, err := range es { + points[i] = fmt.Sprintf("* %s", err) + } + + return fmt.Sprintf( + "%d errors occurred:\n\t%s\n\n", + len(es), strings.Join(points, "\n\t")) +} diff --git a/vendor/github.com/hashicorp/go-multierror/go.mod b/vendor/github.com/hashicorp/go-multierror/go.mod new file mode 100644 index 0000000..0afe8e6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/go.mod @@ -0,0 +1,5 @@ +module github.com/hashicorp/go-multierror + +go 1.14 + +require github.com/hashicorp/errwrap v1.0.0 diff --git a/vendor/github.com/hashicorp/go-multierror/go.sum b/vendor/github.com/hashicorp/go-multierror/go.sum new file mode 100644 index 0000000..e8238e9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/go.sum @@ -0,0 +1,2 @@ +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= diff --git a/vendor/github.com/hashicorp/go-multierror/group.go b/vendor/github.com/hashicorp/go-multierror/group.go new file mode 100644 index 0000000..9c29efb --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/group.go @@ -0,0 +1,38 @@ +package multierror + +import "sync" + +// Group is a collection of goroutines which return errors that need to be +// coalesced. +type Group struct { + mutex sync.Mutex + err *Error + wg sync.WaitGroup +} + +// Go calls the given function in a new goroutine. +// +// If the function returns an error it is added to the group multierror which +// is returned by Wait. +func (g *Group) Go(f func() error) { + g.wg.Add(1) + + go func() { + defer g.wg.Done() + + if err := f(); err != nil { + g.mutex.Lock() + g.err = Append(g.err, err) + g.mutex.Unlock() + } + }() +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the multierror. +func (g *Group) Wait() *Error { + g.wg.Wait() + g.mutex.Lock() + defer g.mutex.Unlock() + return g.err +} diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go new file mode 100644 index 0000000..d05dd92 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/multierror.go @@ -0,0 +1,118 @@ +package multierror + +import ( + "errors" + "fmt" +) + +// Error is an error type to track multiple errors. This is used to +// accumulate errors in cases and return them as a single "error". +type Error struct { + Errors []error + ErrorFormat ErrorFormatFunc +} + +func (e *Error) Error() string { + fn := e.ErrorFormat + if fn == nil { + fn = ListFormatFunc + } + + return fn(e.Errors) +} + +// ErrorOrNil returns an error interface if this Error represents +// a list of errors, or returns nil if the list of errors is empty. This +// function is useful at the end of accumulation to make sure that the value +// returned represents the existence of errors. +func (e *Error) ErrorOrNil() error { + if e == nil { + return nil + } + if len(e.Errors) == 0 { + return nil + } + + return e +} + +func (e *Error) GoString() string { + return fmt.Sprintf("*%#v", *e) +} + +// WrappedErrors returns the list of errors that this Error is wrapping. +// It is an implementation of the errwrap.Wrapper interface so that +// multierror.Error can be used with that library. +// +// This method is not safe to be called concurrently and is no different +// than accessing the Errors field directly. It is implemented only to +// satisfy the errwrap.Wrapper interface. +func (e *Error) WrappedErrors() []error { + return e.Errors +} + +// Unwrap returns an error from Error (or nil if there are no errors). +// This error returned will further support Unwrap to get the next error, +// etc. The order will match the order of Errors in the multierror.Error +// at the time of calling. +// +// The resulting error supports errors.As/Is/Unwrap so you can continue +// to use the stdlib errors package to introspect further. +// +// This will perform a shallow copy of the errors slice. Any errors appended +// to this error after calling Unwrap will not be available until a new +// Unwrap is called on the multierror.Error. +func (e *Error) Unwrap() error { + // If we have no errors then we do nothing + if e == nil || len(e.Errors) == 0 { + return nil + } + + // If we have exactly one error, we can just return that directly. + if len(e.Errors) == 1 { + return e.Errors[0] + } + + // Shallow copy the slice + errs := make([]error, len(e.Errors)) + copy(errs, e.Errors) + return chain(errs) +} + +// chain implements the interfaces necessary for errors.Is/As/Unwrap to +// work in a deterministic way with multierror. A chain tracks a list of +// errors while accounting for the current represented error. This lets +// Is/As be meaningful. +// +// Unwrap returns the next error. In the cleanest form, Unwrap would return +// the wrapped error here but we can't do that if we want to properly +// get access to all the errors. Instead, users are recommended to use +// Is/As to get the correct error type out. +// +// Precondition: []error is non-empty (len > 0) +type chain []error + +// Error implements the error interface +func (e chain) Error() string { + return e[0].Error() +} + +// Unwrap implements errors.Unwrap by returning the next error in the +// chain or nil if there are no more errors. +func (e chain) Unwrap() error { + if len(e) == 1 { + return nil + } + + return e[1:] +} + +// As implements errors.As by attempting to map to the current value. +func (e chain) As(target interface{}) bool { + return errors.As(e[0], target) +} + +// Is implements errors.Is by comparing the current value directly. +func (e chain) Is(target error) bool { + return errors.Is(e[0], target) +} diff --git a/vendor/github.com/hashicorp/go-multierror/prefix.go b/vendor/github.com/hashicorp/go-multierror/prefix.go new file mode 100644 index 0000000..5c477ab --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/prefix.go @@ -0,0 +1,37 @@ +package multierror + +import ( + "fmt" + + "github.com/hashicorp/errwrap" +) + +// Prefix is a helper function that will prefix some text +// to the given error. If the error is a multierror.Error, then +// it will be prefixed to each wrapped error. +// +// This is useful to use when appending multiple multierrors +// together in order to give better scoping. +func Prefix(err error, prefix string) error { + if err == nil { + return nil + } + + format := fmt.Sprintf("%s {{err}}", prefix) + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Wrap each of the errors + for i, e := range err.Errors { + err.Errors[i] = errwrap.Wrapf(format, e) + } + + return err + default: + return errwrap.Wrapf(format, err) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/sort.go b/vendor/github.com/hashicorp/go-multierror/sort.go new file mode 100644 index 0000000..fecb14e --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/sort.go @@ -0,0 +1,16 @@ +package multierror + +// Len implements sort.Interface function for length +func (err Error) Len() int { + return len(err.Errors) +} + +// Swap implements sort.Interface function for swapping elements +func (err Error) Swap(i, j int) { + err.Errors[i], err.Errors[j] = err.Errors[j], err.Errors[i] +} + +// Less implements sort.Interface function for determining order +func (err Error) Less(i, j int) bool { + return err.Errors[i].Error() < err.Errors[j].Error() +} diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md index fe305ad..46ee09f 100644 --- a/vendor/github.com/hashicorp/go-plugin/README.md +++ b/vendor/github.com/hashicorp/go-plugin/README.md @@ -141,11 +141,6 @@ This plugin system will give host processes a system for constraining versions. This is in addition to the protocol versioning already present which is more for larger underlying changes. -**Plugin fetching.** We will integrate with [go-getter](https://github.com/hashicorp/go-getter) -to support automatic download + install of plugins. Paired with cryptographically -secure plugins (above), we can make this a safe operation for an amazing -user experience. - ## What About Shared Libraries? When we started using plugins (late 2012, early 2013), plugins over RPC diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go index bc56559..780a312 100644 --- a/vendor/github.com/hashicorp/go-plugin/client.go +++ b/vendor/github.com/hashicorp/go-plugin/client.go @@ -159,11 +159,8 @@ type ClientConfig struct { // SyncStdout, SyncStderr can be set to override the // respective os.Std* values in the plugin. Care should be taken to - // avoid races here. If these are nil, then this will automatically be - // hooked up to os.Stdin, Stdout, and Stderr, respectively. - // - // If the default values (nil) are used, then this package will not - // sync any of these streams. + // avoid races here. If these are nil, then this will be set to + // ioutil.Discard. SyncStdout io.Writer SyncStderr io.Writer @@ -215,6 +212,12 @@ type ReattachConfig struct { Protocol Protocol Addr net.Addr Pid int + + // Test is set to true if this is reattaching to to a plugin in "test mode" + // (see ServeConfig.Test). In this mode, client.Kill will NOT kill the + // process and instead will rely on the plugin to terminate itself. This + // should not be used in non-test environments. + Test bool } // SecureConfig is used to configure a client to verify the integrity of an @@ -690,14 +693,14 @@ func (c *Client) Start() (addr net.Addr, err error) { // Check the core protocol. Wrapped in a {} for scoping. { - var coreProtocol int64 - coreProtocol, err = strconv.ParseInt(parts[0], 10, 0) + var coreProtocol int + coreProtocol, err = strconv.Atoi(parts[0]) if err != nil { err = fmt.Errorf("Error parsing core protocol version: %s", err) return } - if int(coreProtocol) != CoreProtocolVersion { + if coreProtocol != CoreProtocolVersion { err = fmt.Errorf("Incompatible core API version with plugin. "+ "Plugin version: %s, Core version: %d\n\n"+ "To fix this, the plugin usually only needs to be recompiled.\n"+ @@ -788,7 +791,10 @@ func (c *Client) reattach() (net.Addr, error) { // Verify the process still exists. If not, then it is an error p, err := os.FindProcess(c.config.Reattach.Pid) if err != nil { - return nil, err + // On Unix systems, FindProcess never returns an error. + // On Windows, for non-existent pids it returns: + // os.SyscallError - 'OpenProcess: the paremter is incorrect' + return nil, ErrProcessNotFound } // Attempt to connect to the addr since on Unix systems FindProcess @@ -825,15 +831,21 @@ func (c *Client) reattach() (net.Addr, error) { c.exited = true }(p.Pid) - // Set the address and process + // Set the address and protocol c.address = c.config.Reattach.Addr - c.process = p c.protocol = c.config.Reattach.Protocol if c.protocol == "" { // Default the protocol to net/rpc for backwards compatibility c.protocol = ProtocolNetRPC } + // If we're in test mode, we do NOT set the process. This avoids the + // process being killed (the only purpose we have for c.process), since + // in test mode the process is responsible for exiting on its own. + if !c.config.Reattach.Test { + c.process = p + } + return c.address, nil } diff --git a/vendor/github.com/hashicorp/go-plugin/go.mod b/vendor/github.com/hashicorp/go-plugin/go.mod index f3ddf44..4e182e6 100644 --- a/vendor/github.com/hashicorp/go-plugin/go.mod +++ b/vendor/github.com/hashicorp/go-plugin/go.mod @@ -1,17 +1,15 @@ module github.com/hashicorp/go-plugin +go 1.13 + require ( - github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect - github.com/golang/protobuf v1.2.0 - github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd + github.com/golang/protobuf v1.3.4 + github.com/hashicorp/go-hclog v0.14.1 github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb + github.com/jhump/protoreflect v1.6.0 github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 github.com/oklog/run v1.0.0 github.com/stretchr/testify v1.3.0 // indirect - golang.org/x/net v0.0.0-20180826012351-8a410e7b638d - golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect - golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc // indirect - golang.org/x/text v0.3.0 // indirect - google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 // indirect - google.golang.org/grpc v1.14.0 + golang.org/x/net v0.0.0-20190311183353-d8887717615a + google.golang.org/grpc v1.27.1 ) diff --git a/vendor/github.com/hashicorp/go-plugin/go.sum b/vendor/github.com/hashicorp/go-plugin/go.sum index 21b14e9..5606204 100644 --- a/vendor/github.com/hashicorp/go-plugin/go.sum +++ b/vendor/github.com/hashicorp/go-plugin/go.sum @@ -1,31 +1,87 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd h1:rNuUHR+CvK1IS89MMtcF0EpcVMZtjKfPRp4MEmt/aTs= -github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 h1:7GoSOOW2jpsfkntVKaS2rAr1TJqfcxotyaUcuxoZSzg= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc h1:WiYx1rIFmx8c0mXAFtv5D/mHyKe1+jmuP7PViuwqwuQ= -golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/vendor/github.com/hashicorp/go-plugin/grpc_client.go index d0d0d8e..9781219 100644 --- a/vendor/github.com/hashicorp/go-plugin/grpc_client.go +++ b/vendor/github.com/hashicorp/go-plugin/grpc_client.go @@ -37,7 +37,6 @@ func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32)), grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(math.MaxInt32))) - // Connect. Note the first parameter is unused because we use a custom // dialer that has the state to see the address. conn, err := grpc.Dial("unused", opts...) @@ -62,6 +61,13 @@ func newGRPCClient(doneCtx context.Context, c *Client) (*GRPCClient, error) { go broker.Run() go brokerGRPCClient.StartStream() + // Start the stdio client + stdioClient, err := newGRPCStdioClient(doneCtx, c.logger.Named("stdio"), conn) + if err != nil { + return nil, err + } + go stdioClient.Run(c.config.SyncStdout, c.config.SyncStderr) + cl := &GRPCClient{ Conn: conn, Plugins: c.config.Plugins, diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_server.go b/vendor/github.com/hashicorp/go-plugin/grpc_server.go index d3dbf1c..387628b 100644 --- a/vendor/github.com/hashicorp/go-plugin/grpc_server.go +++ b/vendor/github.com/hashicorp/go-plugin/grpc_server.go @@ -14,6 +14,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/health" "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/reflection" ) // GRPCServiceName is the name of the service that the health check should @@ -51,9 +52,10 @@ type GRPCServer struct { Stdout io.Reader Stderr io.Reader - config GRPCServerConfig - server *grpc.Server - broker *GRPCBroker + config GRPCServerConfig + server *grpc.Server + broker *GRPCBroker + stdioServer *grpcStdioServer logger hclog.Logger } @@ -73,6 +75,9 @@ func (s *GRPCServer) Init() error { GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING) grpc_health_v1.RegisterHealthServer(s.server, healthCheck) + // Register the reflection service + reflection.Register(s.server) + // Register the broker service brokerServer := newGRPCBrokerServer() plugin.RegisterGRPCBrokerServer(s.server, brokerServer) @@ -80,11 +85,13 @@ func (s *GRPCServer) Init() error { go s.broker.Run() // Register the controller - controllerServer := &grpcControllerServer{ - server: s, - } + controllerServer := &grpcControllerServer{server: s} plugin.RegisterGRPCControllerServer(s.server, controllerServer) + // Register the stdio service + s.stdioServer = newGRPCStdioServer(s.logger, s.Stdout, s.Stderr) + plugin.RegisterGRPCStdioServer(s.server, s.stdioServer) + // Register all our plugins onto the gRPC server. for k, raw := range s.Plugins { p, ok := raw.(GRPCPlugin) diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go b/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go new file mode 100644 index 0000000..a582181 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go @@ -0,0 +1,207 @@ +package plugin + +import ( + "bufio" + "bytes" + "context" + "io" + + empty "github.com/golang/protobuf/ptypes/empty" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/plugin" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// grpcStdioBuffer is the buffer size we try to fill when sending a chunk of +// stdio data. This is currently 1 KB for no reason other than that seems like +// enough (stdio data isn't that common) and is fairly low. +const grpcStdioBuffer = 1 * 1024 + +// grpcStdioServer implements the Stdio service and streams stdiout/stderr. +type grpcStdioServer struct { + stdoutCh <-chan []byte + stderrCh <-chan []byte +} + +// newGRPCStdioServer creates a new grpcStdioServer and starts the stream +// copying for the given out and err readers. +// +// This must only be called ONCE per srcOut, srcErr. +func newGRPCStdioServer(log hclog.Logger, srcOut, srcErr io.Reader) *grpcStdioServer { + stdoutCh := make(chan []byte) + stderrCh := make(chan []byte) + + // Begin copying the streams + go copyChan(log, stdoutCh, srcOut) + go copyChan(log, stderrCh, srcErr) + + // Construct our server + return &grpcStdioServer{ + stdoutCh: stdoutCh, + stderrCh: stderrCh, + } +} + +// StreamStdio streams our stdout/err as the response. +func (s *grpcStdioServer) StreamStdio( + _ *empty.Empty, + srv plugin.GRPCStdio_StreamStdioServer, +) error { + // Share the same data value between runs. Sending this over the wire + // marshals it so we can reuse this. + var data plugin.StdioData + + for { + // Read our data + select { + case data.Data = <-s.stdoutCh: + data.Channel = plugin.StdioData_STDOUT + + case data.Data = <-s.stderrCh: + data.Channel = plugin.StdioData_STDERR + + case <-srv.Context().Done(): + return nil + } + + // Not sure if this is possible, but if we somehow got here and + // we didn't populate any data at all, then just continue. + if len(data.Data) == 0 { + continue + } + + // Send our data to the client. + if err := srv.Send(&data); err != nil { + return err + } + } +} + +// grpcStdioClient wraps the stdio service as a client to copy +// the stdio data to output writers. +type grpcStdioClient struct { + log hclog.Logger + stdioClient plugin.GRPCStdio_StreamStdioClient +} + +// newGRPCStdioClient creates a grpcStdioClient. This will perform the +// initial connection to the stdio service. If the stdio service is unavailable +// then this will be a no-op. This allows this to work without error for +// plugins that don't support this. +func newGRPCStdioClient( + ctx context.Context, + log hclog.Logger, + conn *grpc.ClientConn, +) (*grpcStdioClient, error) { + client := plugin.NewGRPCStdioClient(conn) + + // Connect immediately to the endpoint + stdioClient, err := client.StreamStdio(ctx, &empty.Empty{}) + + // If we get an Unavailable or Unimplemented error, this means that the plugin isn't + // updated and linking to the latest version of go-plugin that supports + // this. We fall back to the previous behavior of just not syncing anything. + if status.Code(err) == codes.Unavailable || status.Code(err) == codes.Unimplemented { + log.Warn("stdio service not available, stdout/stderr syncing unavailable") + stdioClient = nil + err = nil + } + if err != nil { + return nil, err + } + + return &grpcStdioClient{ + log: log, + stdioClient: stdioClient, + }, nil +} + +// Run starts the loop that receives stdio data and writes it to the given +// writers. This blocks and should be run in a goroutine. +func (c *grpcStdioClient) Run(stdout, stderr io.Writer) { + // This will be nil if stdio is not supported by the plugin + if c.stdioClient == nil { + c.log.Warn("stdio service unavailable, run will do nothing") + return + } + + for { + c.log.Trace("waiting for stdio data") + data, err := c.stdioClient.Recv() + if err != nil { + if err == io.EOF || + status.Code(err) == codes.Unavailable || + status.Code(err) == codes.Canceled || + status.Code(err) == codes.Unimplemented || + err == context.Canceled { + c.log.Debug("received EOF, stopping recv loop", "err", err) + return + } + + c.log.Error("error receiving data", "err", err) + return + } + + // Determine our output writer based on channel + var w io.Writer + switch data.Channel { + case plugin.StdioData_STDOUT: + w = stdout + + case plugin.StdioData_STDERR: + w = stderr + + default: + c.log.Warn("unknown channel, dropping", "channel", data.Channel) + continue + } + + // Write! In the event of an error we just continue. + if c.log.IsTrace() { + c.log.Trace("received data", "channel", data.Channel.String(), "len", len(data.Data)) + } + if _, err := io.Copy(w, bytes.NewReader(data.Data)); err != nil { + c.log.Error("failed to copy all bytes", "err", err) + } + } +} + +// copyChan copies an io.Reader into a channel. +func copyChan(log hclog.Logger, dst chan<- []byte, src io.Reader) { + bufsrc := bufio.NewReader(src) + + for { + // Make our data buffer. We allocate a new one per loop iteration + // so that we can send it over the channel. + var data [1024]byte + + // Read the data, this will block until data is available + n, err := bufsrc.Read(data[:]) + + // We have to check if we have data BEFORE err != nil. The bufio + // docs guarantee n == 0 on EOF but its better to be safe here. + if n > 0 { + // We have data! Send it on the channel. This will block if there + // is no reader on the other side. We expect that go-plugin will + // connect immediately to the stdio server to drain this so we want + // this block to happen for backpressure. + dst <- data[:n] + } + + // If we hit EOF we're done copying + if err == io.EOF { + log.Debug("stdio EOF, exiting copy loop") + return + } + + // Any other error we just exit the loop. We don't expect there to + // be errors since our use case for this is reading/writing from + // a in-process pipe (os.Pipe). + if err != nil { + log.Warn("error copying stdio data, stopping copy", "err", err) + return + } + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go index aa2fdc8..fb9d415 100644 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go @@ -1,3 +1,3 @@ -//go:generate protoc -I ./ ./grpc_broker.proto ./grpc_controller.proto --go_out=plugins=grpc:. +//go:generate protoc -I ./ ./grpc_broker.proto ./grpc_controller.proto ./grpc_stdio.proto --go_out=plugins=grpc:. package plugin diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go index b6850aa..6bf1038 100644 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go @@ -3,12 +3,13 @@ package plugin +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" context "golang.org/x/net/context" grpc "google.golang.org/grpc" - math "math" ) // Reference imports to suppress errors if they are not otherwise used. @@ -35,17 +36,16 @@ func (m *ConnInfo) Reset() { *m = ConnInfo{} } func (m *ConnInfo) String() string { return proto.CompactTextString(m) } func (*ConnInfo) ProtoMessage() {} func (*ConnInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_802e9beed3ec3b28, []int{0} + return fileDescriptor_grpc_broker_3322b07398605250, []int{0} } - func (m *ConnInfo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ConnInfo.Unmarshal(m, b) } func (m *ConnInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ConnInfo.Marshal(b, m, deterministic) } -func (m *ConnInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConnInfo.Merge(m, src) +func (dst *ConnInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnInfo.Merge(dst, src) } func (m *ConnInfo) XXX_Size() int { return xxx_messageInfo_ConnInfo.Size(m) @@ -81,23 +81,6 @@ func init() { proto.RegisterType((*ConnInfo)(nil), "plugin.ConnInfo") } -func init() { proto.RegisterFile("grpc_broker.proto", fileDescriptor_802e9beed3ec3b28) } - -var fileDescriptor_802e9beed3ec3b28 = []byte{ - // 175 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x2f, 0x2a, 0x48, - 0x8e, 0x4f, 0x2a, 0xca, 0xcf, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, - 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x8a, 0xe5, 0xe2, 0x70, 0xce, 0xcf, 0xcb, 0xf3, 0xcc, 0x4b, - 0xcb, 0x17, 0x92, 0xe5, 0xe2, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x8d, 0xcf, 0x4c, 0x91, - 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0d, 0xe2, 0x84, 0x8a, 0x78, 0xa6, 0x08, 0x49, 0x70, 0xb1, 0xe7, - 0xa5, 0x96, 0x94, 0xe7, 0x17, 0x65, 0x4b, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x20, - 0x99, 0xc4, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x09, 0x66, 0x88, 0x0c, 0x94, 0x6b, 0xe4, 0xcc, - 0xc5, 0xe5, 0x1e, 0x14, 0xe0, 0xec, 0x04, 0xb6, 0x5a, 0xc8, 0x94, 0x8b, 0x3b, 0xb8, 0x24, 0xb1, - 0xa8, 0x24, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x57, 0x48, 0x40, 0x0f, 0xe2, 0x08, 0x3d, 0x98, 0x0b, - 0xa4, 0x30, 0x44, 0x34, 0x18, 0x0d, 0x18, 0x9d, 0x38, 0xa2, 0xa0, 0xae, 0x4d, 0x62, 0x03, 0x3b, - 0xde, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x10, 0x15, 0x39, 0x47, 0xd1, 0x00, 0x00, 0x00, -} - // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn @@ -201,3 +184,20 @@ var _GRPCBroker_serviceDesc = grpc.ServiceDesc{ }, Metadata: "grpc_broker.proto", } + +func init() { proto.RegisterFile("grpc_broker.proto", fileDescriptor_grpc_broker_3322b07398605250) } + +var fileDescriptor_grpc_broker_3322b07398605250 = []byte{ + // 175 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x2f, 0x2a, 0x48, + 0x8e, 0x4f, 0x2a, 0xca, 0xcf, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, + 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x8a, 0xe5, 0xe2, 0x70, 0xce, 0xcf, 0xcb, 0xf3, 0xcc, 0x4b, + 0xcb, 0x17, 0x92, 0xe5, 0xe2, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x8d, 0xcf, 0x4c, 0x91, + 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0d, 0xe2, 0x84, 0x8a, 0x78, 0xa6, 0x08, 0x49, 0x70, 0xb1, 0xe7, + 0xa5, 0x96, 0x94, 0xe7, 0x17, 0x65, 0x4b, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x20, + 0x99, 0xc4, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x09, 0x66, 0x88, 0x0c, 0x94, 0x6b, 0xe4, 0xcc, + 0xc5, 0xe5, 0x1e, 0x14, 0xe0, 0xec, 0x04, 0xb6, 0x5a, 0xc8, 0x94, 0x8b, 0x3b, 0xb8, 0x24, 0xb1, + 0xa8, 0x24, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x57, 0x48, 0x40, 0x0f, 0xe2, 0x08, 0x3d, 0x98, 0x0b, + 0xa4, 0x30, 0x44, 0x34, 0x18, 0x0d, 0x18, 0x9d, 0x38, 0xa2, 0xa0, 0xae, 0x4d, 0x62, 0x03, 0x3b, + 0xde, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x10, 0x15, 0x39, 0x47, 0xd1, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto index 3fa79e8..aa3df46 100644 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto @@ -11,5 +11,3 @@ message ConnInfo { service GRPCBroker { rpc StartStream(stream ConnInfo) returns (stream ConnInfo); } - - diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go index 38b4204..3e39da9 100644 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go @@ -3,12 +3,13 @@ package plugin +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" context "golang.org/x/net/context" grpc "google.golang.org/grpc" - math "math" ) // Reference imports to suppress errors if they are not otherwise used. @@ -32,17 +33,16 @@ func (m *Empty) Reset() { *m = Empty{} } func (m *Empty) String() string { return proto.CompactTextString(m) } func (*Empty) ProtoMessage() {} func (*Empty) Descriptor() ([]byte, []int) { - return fileDescriptor_23c2c7e42feab570, []int{0} + return fileDescriptor_grpc_controller_08f8296ef6d80436, []int{0} } - func (m *Empty) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Empty.Unmarshal(m, b) } func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Empty.Marshal(b, m, deterministic) } -func (m *Empty) XXX_Merge(src proto.Message) { - xxx_messageInfo_Empty.Merge(m, src) +func (dst *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(dst, src) } func (m *Empty) XXX_Size() int { return xxx_messageInfo_Empty.Size(m) @@ -57,19 +57,6 @@ func init() { proto.RegisterType((*Empty)(nil), "plugin.Empty") } -func init() { proto.RegisterFile("grpc_controller.proto", fileDescriptor_23c2c7e42feab570) } - -var fileDescriptor_23c2c7e42feab570 = []byte{ - // 108 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0x2f, 0x2a, 0x48, - 0x8e, 0x4f, 0xce, 0xcf, 0x2b, 0x29, 0xca, 0xcf, 0xc9, 0x49, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, - 0xc9, 0x17, 0x62, 0x2b, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x62, 0xe7, 0x62, 0x75, 0xcd, 0x2d, - 0x28, 0xa9, 0x34, 0xb2, 0xe2, 0xe2, 0x73, 0x0f, 0x0a, 0x70, 0x76, 0x86, 0x2b, 0x14, 0xd2, 0xe0, - 0xe2, 0x08, 0xce, 0x28, 0x2d, 0x49, 0xc9, 0x2f, 0xcf, 0x13, 0xe2, 0xd5, 0x83, 0xa8, 0xd7, 0x03, - 0x2b, 0x96, 0x42, 0xe5, 0x3a, 0x71, 0x44, 0x41, 0x8d, 0x4b, 0x62, 0x03, 0x9b, 0x6e, 0x0c, 0x08, - 0x00, 0x00, 0xff, 0xff, 0xab, 0x7c, 0x27, 0xe5, 0x76, 0x00, 0x00, 0x00, -} - // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn @@ -141,3 +128,18 @@ var _GRPCController_serviceDesc = grpc.ServiceDesc{ Streams: []grpc.StreamDesc{}, Metadata: "grpc_controller.proto", } + +func init() { + proto.RegisterFile("grpc_controller.proto", fileDescriptor_grpc_controller_08f8296ef6d80436) +} + +var fileDescriptor_grpc_controller_08f8296ef6d80436 = []byte{ + // 108 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0x2f, 0x2a, 0x48, + 0x8e, 0x4f, 0xce, 0xcf, 0x2b, 0x29, 0xca, 0xcf, 0xc9, 0x49, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, + 0xc9, 0x17, 0x62, 0x2b, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x62, 0xe7, 0x62, 0x75, 0xcd, 0x2d, + 0x28, 0xa9, 0x34, 0xb2, 0xe2, 0xe2, 0x73, 0x0f, 0x0a, 0x70, 0x76, 0x86, 0x2b, 0x14, 0xd2, 0xe0, + 0xe2, 0x08, 0xce, 0x28, 0x2d, 0x49, 0xc9, 0x2f, 0xcf, 0x13, 0xe2, 0xd5, 0x83, 0xa8, 0xd7, 0x03, + 0x2b, 0x96, 0x42, 0xe5, 0x3a, 0x71, 0x44, 0x41, 0x8d, 0x4b, 0x62, 0x03, 0x9b, 0x6e, 0x0c, 0x08, + 0x00, 0x00, 0xff, 0xff, 0xab, 0x7c, 0x27, 0xe5, 0x76, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go new file mode 100644 index 0000000..c8f9492 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go @@ -0,0 +1,233 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_stdio.proto + +package plugin + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import empty "github.com/golang/protobuf/ptypes/empty" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type StdioData_Channel int32 + +const ( + StdioData_INVALID StdioData_Channel = 0 + StdioData_STDOUT StdioData_Channel = 1 + StdioData_STDERR StdioData_Channel = 2 +) + +var StdioData_Channel_name = map[int32]string{ + 0: "INVALID", + 1: "STDOUT", + 2: "STDERR", +} +var StdioData_Channel_value = map[string]int32{ + "INVALID": 0, + "STDOUT": 1, + "STDERR": 2, +} + +func (x StdioData_Channel) String() string { + return proto.EnumName(StdioData_Channel_name, int32(x)) +} +func (StdioData_Channel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_grpc_stdio_db2934322ca63bd5, []int{0, 0} +} + +// StdioData is a single chunk of stdout or stderr data that is streamed +// from GRPCStdio. +type StdioData struct { + Channel StdioData_Channel `protobuf:"varint,1,opt,name=channel,proto3,enum=plugin.StdioData_Channel" json:"channel,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StdioData) Reset() { *m = StdioData{} } +func (m *StdioData) String() string { return proto.CompactTextString(m) } +func (*StdioData) ProtoMessage() {} +func (*StdioData) Descriptor() ([]byte, []int) { + return fileDescriptor_grpc_stdio_db2934322ca63bd5, []int{0} +} +func (m *StdioData) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StdioData.Unmarshal(m, b) +} +func (m *StdioData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StdioData.Marshal(b, m, deterministic) +} +func (dst *StdioData) XXX_Merge(src proto.Message) { + xxx_messageInfo_StdioData.Merge(dst, src) +} +func (m *StdioData) XXX_Size() int { + return xxx_messageInfo_StdioData.Size(m) +} +func (m *StdioData) XXX_DiscardUnknown() { + xxx_messageInfo_StdioData.DiscardUnknown(m) +} + +var xxx_messageInfo_StdioData proto.InternalMessageInfo + +func (m *StdioData) GetChannel() StdioData_Channel { + if m != nil { + return m.Channel + } + return StdioData_INVALID +} + +func (m *StdioData) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func init() { + proto.RegisterType((*StdioData)(nil), "plugin.StdioData") + proto.RegisterEnum("plugin.StdioData_Channel", StdioData_Channel_name, StdioData_Channel_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GRPCStdioClient is the client API for GRPCStdio service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCStdioClient interface { + // StreamStdio returns a stream that contains all the stdout/stderr. + // This RPC endpoint must only be called ONCE. Once stdio data is consumed + // it is not sent again. + // + // Callers should connect early to prevent blocking on the plugin process. + StreamStdio(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) +} + +type gRPCStdioClient struct { + cc *grpc.ClientConn +} + +func NewGRPCStdioClient(cc *grpc.ClientConn) GRPCStdioClient { + return &gRPCStdioClient{cc} +} + +func (c *gRPCStdioClient) StreamStdio(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) { + stream, err := c.cc.NewStream(ctx, &_GRPCStdio_serviceDesc.Streams[0], "/plugin.GRPCStdio/StreamStdio", opts...) + if err != nil { + return nil, err + } + x := &gRPCStdioStreamStdioClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type GRPCStdio_StreamStdioClient interface { + Recv() (*StdioData, error) + grpc.ClientStream +} + +type gRPCStdioStreamStdioClient struct { + grpc.ClientStream +} + +func (x *gRPCStdioStreamStdioClient) Recv() (*StdioData, error) { + m := new(StdioData) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GRPCStdioServer is the server API for GRPCStdio service. +type GRPCStdioServer interface { + // StreamStdio returns a stream that contains all the stdout/stderr. + // This RPC endpoint must only be called ONCE. Once stdio data is consumed + // it is not sent again. + // + // Callers should connect early to prevent blocking on the plugin process. + StreamStdio(*empty.Empty, GRPCStdio_StreamStdioServer) error +} + +func RegisterGRPCStdioServer(s *grpc.Server, srv GRPCStdioServer) { + s.RegisterService(&_GRPCStdio_serviceDesc, srv) +} + +func _GRPCStdio_StreamStdio_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(empty.Empty) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(GRPCStdioServer).StreamStdio(m, &gRPCStdioStreamStdioServer{stream}) +} + +type GRPCStdio_StreamStdioServer interface { + Send(*StdioData) error + grpc.ServerStream +} + +type gRPCStdioStreamStdioServer struct { + grpc.ServerStream +} + +func (x *gRPCStdioStreamStdioServer) Send(m *StdioData) error { + return x.ServerStream.SendMsg(m) +} + +var _GRPCStdio_serviceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCStdio", + HandlerType: (*GRPCStdioServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamStdio", + Handler: _GRPCStdio_StreamStdio_Handler, + ServerStreams: true, + }, + }, + Metadata: "grpc_stdio.proto", +} + +func init() { proto.RegisterFile("grpc_stdio.proto", fileDescriptor_grpc_stdio_db2934322ca63bd5) } + +var fileDescriptor_grpc_stdio_db2934322ca63bd5 = []byte{ + // 221 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x48, 0x2f, 0x2a, 0x48, + 0x8e, 0x2f, 0x2e, 0x49, 0xc9, 0xcc, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, 0xc8, + 0x29, 0x4d, 0xcf, 0xcc, 0x93, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x07, 0x8b, 0x26, + 0x95, 0xa6, 0xe9, 0xa7, 0xe6, 0x16, 0x94, 0x54, 0x42, 0x14, 0x29, 0xb5, 0x30, 0x72, 0x71, 0x06, + 0x83, 0x34, 0xb9, 0x24, 0x96, 0x24, 0x0a, 0x19, 0x73, 0xb1, 0x27, 0x67, 0x24, 0xe6, 0xe5, 0xa5, + 0xe6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0xf0, 0x19, 0x49, 0xea, 0x41, 0x0c, 0xd1, 0x83, 0xab, 0xd1, + 0x73, 0x86, 0x28, 0x08, 0x82, 0xa9, 0x14, 0x12, 0xe2, 0x62, 0x49, 0x49, 0x2c, 0x49, 0x94, 0x60, + 0x52, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0xb3, 0x95, 0xf4, 0xb8, 0xd8, 0xa1, 0xea, 0x84, 0xb8, 0xb9, + 0xd8, 0x3d, 0xfd, 0xc2, 0x1c, 0x7d, 0x3c, 0x5d, 0x04, 0x18, 0x84, 0xb8, 0xb8, 0xd8, 0x82, 0x43, + 0x5c, 0xfc, 0x43, 0x43, 0x04, 0x18, 0xa1, 0x6c, 0xd7, 0xa0, 0x20, 0x01, 0x26, 0x23, 0x77, 0x2e, + 0x4e, 0xf7, 0xa0, 0x00, 0x67, 0xb0, 0x2d, 0x42, 0x56, 0x5c, 0xdc, 0xc1, 0x25, 0x45, 0xa9, 0x89, + 0xb9, 0x10, 0xae, 0x98, 0x1e, 0xc4, 0x03, 0x7a, 0x30, 0x0f, 0xe8, 0xb9, 0x82, 0x3c, 0x20, 0x25, + 0x88, 0xe1, 0x36, 0x03, 0x46, 0x27, 0x8e, 0x28, 0xa8, 0xb7, 0x93, 0xd8, 0xc0, 0xca, 0x8d, 0x01, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x5d, 0xbb, 0xe0, 0x69, 0x19, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto new file mode 100644 index 0000000..ce1a122 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; +package plugin; +option go_package = "plugin"; + +import "google/protobuf/empty.proto"; + +// GRPCStdio is a service that is automatically run by the plugin process +// to stream any stdout/err data so that it can be mirrored on the plugin +// host side. +service GRPCStdio { + // StreamStdio returns a stream that contains all the stdout/stderr. + // This RPC endpoint must only be called ONCE. Once stdio data is consumed + // it is not sent again. + // + // Callers should connect early to prevent blocking on the plugin process. + rpc StreamStdio(google.protobuf.Empty) returns (stream StdioData); +} + +// StdioData is a single chunk of stdout or stderr data that is streamed +// from GRPCStdio. +message StdioData { + enum Channel { + INVALID = 0; + STDOUT = 1; + STDERR = 2; + } + + Channel channel = 1; + bytes data = 2; +} diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go index 4c230e3..80f0ac3 100644 --- a/vendor/github.com/hashicorp/go-plugin/server.go +++ b/vendor/github.com/hashicorp/go-plugin/server.go @@ -1,13 +1,14 @@ package plugin import ( + "context" "crypto/tls" "crypto/x509" "encoding/base64" "errors" "fmt" + "io" "io/ioutil" - "log" "net" "os" "os/signal" @@ -15,10 +16,8 @@ import ( "sort" "strconv" "strings" - "sync/atomic" - - "github.com/hashicorp/go-hclog" + hclog "github.com/hashicorp/go-hclog" "google.golang.org/grpc" ) @@ -85,6 +84,51 @@ type ServeConfig struct { // Logger is used to pass a logger into the server. If none is provided the // server will create a default logger. Logger hclog.Logger + + // Test, if non-nil, will put plugin serving into "test mode". This is + // meant to be used as part of `go test` within a plugin's codebase to + // launch the plugin in-process and output a ReattachConfig. + // + // This changes the behavior of the server in a number of ways to + // accomodate the expectation of running in-process: + // + // * The handshake cookie is not validated. + // * Stdout/stderr will receive plugin reads and writes + // * Connection information will not be sent to stdout + // + Test *ServeTestConfig +} + +// ServeTestConfig configures plugin serving for test mode. See ServeConfig.Test. +type ServeTestConfig struct { + // Context, if set, will force the plugin serving to end when cancelled. + // This is only a test configuration because the non-test configuration + // expects to take over the process and therefore end on an interrupt or + // kill signal. For tests, we need to kill the plugin serving routinely + // and this provides a way to do so. + // + // If you want to wait for the plugin process to close before moving on, + // you can wait on CloseCh. + Context context.Context + + // If this channel is non-nil, we will send the ReattachConfig via + // this channel. This can be encoded (via JSON recommended) to the + // plugin client to attach to this plugin. + ReattachConfigCh chan<- *ReattachConfig + + // CloseCh, if non-nil, will be closed when serving exits. This can be + // used along with Context to determine when the server is fully shut down. + // If this is not set, you can still use Context on its own, but note there + // may be a period of time between canceling the context and the plugin + // server being shut down. + CloseCh chan<- struct{} + + // SyncStdio, if true, will enable the client side "SyncStdout/Stderr" + // functionality to work. This defaults to false because the implementation + // of making this work within test environments is particularly messy + // and SyncStdio functionality is fairly rare, so we default to the simple + // scenario. + SyncStdio bool } // protocolVersion determines the protocol version and plugin set to be used by @@ -169,35 +213,52 @@ func protocolVersion(opts *ServeConfig) (int, Protocol, PluginSet) { // Serve serves the plugins given by ServeConfig. // // Serve doesn't return until the plugin is done being executed. Any -// errors will be outputted to os.Stderr. +// fixable errors will be output to os.Stderr and the process will +// exit with a status code of 1. Serve will panic for unexpected +// conditions where a user's fix is unknown. // // This is the method that plugins should call in their main() functions. func Serve(opts *ServeConfig) { - // Validate the handshake config - if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" { - fmt.Fprintf(os.Stderr, - "Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+ - "key or value was set. Please notify the plugin author and report\n"+ - "this as a bug.\n") - os.Exit(1) - } + exitCode := -1 + // We use this to trigger an `os.Exit` so that we can execute our other + // deferred functions. In test mode, we just output the err to stderr + // and return. + defer func() { + if opts.Test == nil && exitCode >= 0 { + os.Exit(exitCode) + } - // First check the cookie - if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue { - fmt.Fprintf(os.Stderr, - "This binary is a plugin. These are not meant to be executed directly.\n"+ - "Please execute the program that consumes these plugins, which will\n"+ - "load any plugins automatically\n") - os.Exit(1) + if opts.Test != nil && opts.Test.CloseCh != nil { + close(opts.Test.CloseCh) + } + }() + + if opts.Test == nil { + // Validate the handshake config + if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" { + fmt.Fprintf(os.Stderr, + "Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+ + "key or value was set. Please notify the plugin author and report\n"+ + "this as a bug.\n") + exitCode = 1 + return + } + + // First check the cookie + if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue { + fmt.Fprintf(os.Stderr, + "This binary is a plugin. These are not meant to be executed directly.\n"+ + "Please execute the program that consumes these plugins, which will\n"+ + "load any plugins automatically\n") + exitCode = 1 + return + } } // negotiate the version and plugins // start with default version in the handshake config protoVersion, protoType, pluginSet := protocolVersion(opts) - // Logging goes to the original stderr - log.SetOutput(os.Stderr) - logger := opts.Logger if logger == nil { // internal logger to os.Stderr @@ -208,19 +269,6 @@ func Serve(opts *ServeConfig) { }) } - // Create our new stdout, stderr files. These will override our built-in - // stdout/stderr so that it works across the stream boundary. - stdout_r, stdout_w, err := os.Pipe() - if err != nil { - fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) - os.Exit(1) - } - stderr_r, stderr_w, err := os.Pipe() - if err != nil { - fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) - os.Exit(1) - } - // Register a listener so we can accept a connection listener, err := serverListener() if err != nil { @@ -281,6 +329,33 @@ func Serve(opts *ServeConfig) { // Create the channel to tell us when we're done doneCh := make(chan struct{}) + // Create our new stdout, stderr files. These will override our built-in + // stdout/stderr so that it works across the stream boundary. + var stdout_r, stderr_r io.Reader + stdout_r, stdout_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + stderr_r, stderr_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + + // If we're in test mode, we tee off the reader and write the data + // as-is to our normal Stdout and Stderr so that they continue working + // while stdio works. This is because in test mode, we assume we're running + // in `go test` or some equivalent and we want output to go to standard + // locations. + if opts.Test != nil { + // TODO(mitchellh): This isn't super ideal because a TeeReader + // only works if the reader side is actively read. If we never + // connect via a plugin client, the output still gets swallowed. + stdout_r = io.TeeReader(stdout_r, os.Stdout) + stderr_r = io.TeeReader(stderr_r, os.Stderr) + } + // Build the server type var server ServerProtocol switch protoType { @@ -323,35 +398,96 @@ func Serve(opts *ServeConfig) { logger.Debug("plugin address", "network", listener.Addr().Network(), "address", listener.Addr().String()) - // Output the address and service name to stdout so that the client can bring it up. - fmt.Printf("%d|%d|%s|%s|%s|%s\n", - CoreProtocolVersion, - protoVersion, - listener.Addr().Network(), - listener.Addr().String(), - protoType, - serverCert) - os.Stdout.Sync() - - // Eat the interrupts - ch := make(chan os.Signal, 1) - signal.Notify(ch, os.Interrupt) - go func() { - var count int32 = 0 - for { - <-ch - newCount := atomic.AddInt32(&count, 1) - logger.Debug("plugin received interrupt signal, ignoring", "count", newCount) + // Output the address and service name to stdout so that the client can + // bring it up. In test mode, we don't do this because clients will + // attach via a reattach config. + if opts.Test == nil { + fmt.Printf("%d|%d|%s|%s|%s|%s\n", + CoreProtocolVersion, + protoVersion, + listener.Addr().Network(), + listener.Addr().String(), + protoType, + serverCert) + os.Stdout.Sync() + } else if ch := opts.Test.ReattachConfigCh; ch != nil { + // Send back the reattach config that can be used. This isn't + // quite ready if they connect immediately but the client should + // retry a few times. + ch <- &ReattachConfig{ + Protocol: protoType, + Addr: listener.Addr(), + Pid: os.Getpid(), + Test: true, } - }() + } + + // Eat the interrupts. In test mode we disable this so that go test + // can be cancelled properly. + if opts.Test == nil { + ch := make(chan os.Signal, 1) + signal.Notify(ch, os.Interrupt) + go func() { + count := 0 + for { + <-ch + count++ + logger.Trace("plugin received interrupt signal, ignoring", "count", count) + } + }() + } - // Set our new out, err - os.Stdout = stdout_w - os.Stderr = stderr_w + // Set our stdout, stderr to the stdio stream that clients can retrieve + // using ClientConfig.SyncStdout/err. We only do this for non-test mode + // or if the test mode explicitly requests it. + // + // In test mode, we use a multiwriter so that the data continues going + // to the normal stdout/stderr so output can show up in test logs. We + // also send to the stdio stream so that clients can continue working + // if they depend on that. + if opts.Test == nil || opts.Test.SyncStdio { + if opts.Test != nil { + // In test mode we need to maintain the original values so we can + // reset it. + defer func(out, err *os.File) { + os.Stdout = out + os.Stderr = err + }(os.Stdout, os.Stderr) + } + os.Stdout = stdout_w + os.Stderr = stderr_w + } // Accept connections and wait for completion go server.Serve(listener) - <-doneCh + + ctx := context.Background() + if opts.Test != nil && opts.Test.Context != nil { + ctx = opts.Test.Context + } + select { + case <-ctx.Done(): + // Cancellation. We can stop the server by closing the listener. + // This isn't graceful at all but this is currently only used by + // tests and its our only way to stop. + listener.Close() + + // If this is a grpc server, then we also ask the server itself to + // end which will kill all connections. There isn't an easy way to do + // this for net/rpc currently but net/rpc is more and more unused. + if s, ok := server.(*GRPCServer); ok { + s.Stop() + } + + // Wait for the server itself to shut down + <-doneCh + + case <-doneCh: + // Note that given the documentation of Serve we should probably be + // setting exitCode = 0 and using os.Exit here. That's how it used to + // work before extracting this library. However, for years we've done + // this so we'll keep this functionality. + } } func serverListener() (net.Listener, error) { @@ -390,7 +526,7 @@ func serverListener_tcp() (net.Listener, error) { } if minPort > maxPort { - return nil, fmt.Errorf("ENV_MIN_PORT value of %d is greater than PLUGIN_MAX_PORT value of %d", minPort, maxPort) + return nil, fmt.Errorf("PLUGIN_MIN_PORT value of %d is greater than PLUGIN_MAX_PORT value of %d", minPort, maxPort) } for port := minPort; port <= maxPort; port++ { diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go index 2cf2c26..e36f2eb 100644 --- a/vendor/github.com/hashicorp/go-plugin/testing.go +++ b/vendor/github.com/hashicorp/go-plugin/testing.go @@ -7,9 +7,9 @@ import ( "net" "net/rpc" - "github.com/mitchellh/go-testing-interface" hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin/internal/plugin" + "github.com/mitchellh/go-testing-interface" "google.golang.org/grpc" ) diff --git a/vendor/github.com/hashicorp/yamux/addr.go b/vendor/github.com/hashicorp/yamux/addr.go index be6ebca..f6a0019 100644 --- a/vendor/github.com/hashicorp/yamux/addr.go +++ b/vendor/github.com/hashicorp/yamux/addr.go @@ -54,7 +54,7 @@ func (s *Stream) LocalAddr() net.Addr { return s.session.LocalAddr() } -// LocalAddr returns the remote address +// RemoteAddr returns the remote address func (s *Stream) RemoteAddr() net.Addr { return s.session.RemoteAddr() } diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go index aa23919..e951c22 100644 --- a/vendor/github.com/hashicorp/yamux/stream.go +++ b/vendor/github.com/hashicorp/yamux/stream.go @@ -446,15 +446,17 @@ func (s *Stream) SetDeadline(t time.Time) error { return nil } -// SetReadDeadline sets the deadline for future Read calls. +// SetReadDeadline sets the deadline for blocked and future Read calls. func (s *Stream) SetReadDeadline(t time.Time) error { s.readDeadline.Store(t) + asyncNotify(s.recvNotifyCh) return nil } -// SetWriteDeadline sets the deadline for future Write calls +// SetWriteDeadline sets the deadline for blocked and future Write calls func (s *Stream) SetWriteDeadline(t time.Time) error { s.writeDeadline.Store(t) + asyncNotify(s.sendNotifyCh) return nil } diff --git a/vendor/github.com/mattermost/ldap/.travis.yml b/vendor/github.com/mattermost/ldap/.travis.yml index bb899b2..f2538fd 100644 --- a/vendor/github.com/mattermost/ldap/.travis.yml +++ b/vendor/github.com/mattermost/ldap/.travis.yml @@ -9,18 +9,19 @@ go: - "1.10.x" - "1.11.x" - "1.12.x" + - "1.13.x" - tip git: - depth: 1 + depth: 1 matrix: fast_finish: true allow_failures: - go: tip -go_import_path: gopkg.in/ldap.v3 +go_import_path: github.com/go-ldap/ldap install: - - go get gopkg.in/asn1-ber.v1 + - go get github.com/go-asn1-ber/asn1-ber - go get code.google.com/p/go.tools/cmd/cover || go get golang.org/x/tools/cmd/cover - go get github.com/golang/lint/golint || go get golang.org/x/lint/golint || true - go build -v ./... diff --git a/vendor/github.com/mattermost/ldap/README.md b/vendor/github.com/mattermost/ldap/README.md index 25cf730..c61e488 100644 --- a/vendor/github.com/mattermost/ldap/README.md +++ b/vendor/github.com/mattermost/ldap/README.md @@ -1,22 +1,8 @@ -[![GoDoc](https://godoc.org/gopkg.in/ldap.v3?status.svg)](https://godoc.org/gopkg.in/ldap.v3) +[![GoDoc](https://godoc.org/github.com/go-ldap/ldap?status.svg)](https://godoc.org/github.com/go-ldap/ldap) [![Build Status](https://travis-ci.org/go-ldap/ldap.svg)](https://travis-ci.org/go-ldap/ldap) # Basic LDAP v3 functionality for the GO programming language. -## Install - -For the latest version use: - - go get gopkg.in/ldap.v3 - -Import the latest version with: - - import "gopkg.in/ldap.v3" - -## Required Libraries: - - - gopkg.in/asn1-ber.v1 - ## Features: - Connecting to LDAP server (non-TLS, TLS, STARTTLS) @@ -34,6 +20,27 @@ Import the latest version with: - search - modify +## Go Modules: + +`go get github.com/go-ldap/ldap/v3` + +As go-ldap was v2+ when Go Modules came out, updating to Go Modules would be considered a breaking change. + +To maintain backwards compatability, we ultimately decided to use subfolders (as v3 was already a branch). +Whilst this duplicates the code, we can move toward implementing a backwards-compatible versioning system that allows for code reuse. +The alternative would be to increment the version number, however we believe that this would confuse users as v3 is in line with LDAPv3 (RFC-4511) +https://tools.ietf.org/html/rfc4511 + + +For more info, please visit the pull request that updated to modules. +https://github.com/go-ldap/ldap/pull/247 + +To install with `GOMODULE111=off`, use `go get github.com/go-ldap/ldap` +https://golang.org/cmd/go/#hdr-Legacy_GOPATH_go_get + +As always, we are looking for contributors with great ideas on how to best move forward. + + ## Contributing: Bug reports and pull requests are welcome! diff --git a/vendor/github.com/mattermost/ldap/add.go b/vendor/github.com/mattermost/ldap/add.go index 19bce1b..a3e6b88 100644 --- a/vendor/github.com/mattermost/ldap/add.go +++ b/vendor/github.com/mattermost/ldap/add.go @@ -10,10 +10,9 @@ package ldap import ( - "errors" "log" - "gopkg.in/asn1-ber.v1" + ber "github.com/go-asn1-ber/asn1-ber" ) // Attribute represents an LDAP attribute @@ -45,20 +44,26 @@ type AddRequest struct { Controls []Control } -func (a AddRequest) encode() *ber.Packet { - request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationAddRequest, nil, "Add Request") - request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, a.DN, "DN")) +func (req *AddRequest) appendTo(envelope *ber.Packet) error { + pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationAddRequest, nil, "Add Request") + pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN")) attributes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes") - for _, attribute := range a.Attributes { + for _, attribute := range req.Attributes { attributes.AppendChild(attribute.encode()) } - request.AppendChild(attributes) - return request + pkt.AppendChild(attributes) + + envelope.AppendChild(pkt) + if len(req.Controls) > 0 { + envelope.AppendChild(encodeControls(req.Controls)) + } + + return nil } // Attribute adds an attribute with the given type and values -func (a *AddRequest) Attribute(attrType string, attrVals []string) { - a.Attributes = append(a.Attributes, Attribute{Type: attrType, Vals: attrVals}) +func (req *AddRequest) Attribute(attrType string, attrVals []string) { + req.Attributes = append(req.Attributes, Attribute{Type: attrType, Vals: attrVals}) } // NewAddRequest returns an AddRequest for the given DN, with no attributes @@ -72,39 +77,17 @@ func NewAddRequest(dn string, controls []Control) *AddRequest { // Add performs the given AddRequest func (l *Conn) Add(addRequest *AddRequest) error { - packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") - packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) - packet.AppendChild(addRequest.encode()) - if len(addRequest.Controls) > 0 { - packet.AppendChild(encodeControls(addRequest.Controls)) - } - - l.Debug.PrintPacket(packet) - - msgCtx, err := l.sendMessage(packet) + msgCtx, err := l.doRequest(addRequest) if err != nil { return err } defer l.finishMessage(msgCtx) - l.Debug.Printf("%d: waiting for response", msgCtx.id) - packetResponse, ok := <-msgCtx.responses - if !ok { - return NewError(ErrorNetwork, errors.New("ldap: response channel closed")) - } - packet, err = packetResponse.ReadPacket() - l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + packet, err := l.readPacket(msgCtx) if err != nil { return err } - if l.Debug { - if err := addLDAPDescriptions(packet); err != nil { - return err - } - ber.PrintPacket(packet) - } - if packet.Children[1].Tag == ApplicationAddResponse { err := GetLDAPError(packet) if err != nil { @@ -113,7 +96,5 @@ func (l *Conn) Add(addRequest *AddRequest) error { } else { log.Printf("Unexpected Response: %d", packet.Children[1].Tag) } - - l.Debug.Printf("%d: returning", msgCtx.id) return nil } diff --git a/vendor/github.com/mattermost/ldap/bind.go b/vendor/github.com/mattermost/ldap/bind.go index 59c3f5e..15307f5 100644 --- a/vendor/github.com/mattermost/ldap/bind.go +++ b/vendor/github.com/mattermost/ldap/bind.go @@ -4,7 +4,7 @@ import ( "errors" "fmt" - "gopkg.in/asn1-ber.v1" + ber "github.com/go-asn1-ber/asn1-ber" ) // SimpleBindRequest represents a username/password bind operation @@ -35,13 +35,18 @@ func NewSimpleBindRequest(username string, password string, controls []Control) } } -func (bindRequest *SimpleBindRequest) encode() *ber.Packet { - request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request") - request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version")) - request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, bindRequest.Username, "User Name")) - request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, bindRequest.Password, "Password")) +func (req *SimpleBindRequest) appendTo(envelope *ber.Packet) error { + pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request") + pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version")) + pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.Username, "User Name")) + pkt.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, req.Password, "Password")) - return request + envelope.AppendChild(pkt) + if len(req.Controls) > 0 { + envelope.AppendChild(encodeControls(req.Controls)) + } + + return nil } // SimpleBind performs the simple bind operation defined in the given request @@ -50,41 +55,17 @@ func (l *Conn) SimpleBind(simpleBindRequest *SimpleBindRequest) (*SimpleBindResu return nil, NewError(ErrorEmptyPassword, errors.New("ldap: empty password not allowed by the client")) } - packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") - packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) - encodedBindRequest := simpleBindRequest.encode() - packet.AppendChild(encodedBindRequest) - if len(simpleBindRequest.Controls) > 0 { - packet.AppendChild(encodeControls(simpleBindRequest.Controls)) - } - - if l.Debug { - ber.PrintPacket(packet) - } - - msgCtx, err := l.sendMessage(packet) + msgCtx, err := l.doRequest(simpleBindRequest) if err != nil { return nil, err } defer l.finishMessage(msgCtx) - packetResponse, ok := <-msgCtx.responses - if !ok { - return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed")) - } - packet, err = packetResponse.ReadPacket() - l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + packet, err := l.readPacket(msgCtx) if err != nil { return nil, err } - if l.Debug { - if err = addLDAPDescriptions(packet); err != nil { - return nil, err - } - ber.PrintPacket(packet) - } - result := &SimpleBindResult{ Controls: make([]Control, 0), } @@ -133,3 +114,39 @@ func (l *Conn) UnauthenticatedBind(username string) error { _, err := l.SimpleBind(req) return err } + +var externalBindRequest = requestFunc(func(envelope *ber.Packet) error { + pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request") + pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version")) + pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "User Name")) + + saslAuth := ber.Encode(ber.ClassContext, ber.TypeConstructed, 3, "", "authentication") + saslAuth.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "EXTERNAL", "SASL Mech")) + saslAuth.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "SASL Cred")) + + pkt.AppendChild(saslAuth) + + envelope.AppendChild(pkt) + + return nil +}) + +// ExternalBind performs SASL/EXTERNAL authentication. +// +// Use ldap.DialURL("ldapi://") to connect to the Unix socket before ExternalBind. +// +// See https://tools.ietf.org/html/rfc4422#appendix-A +func (l *Conn) ExternalBind() error { + msgCtx, err := l.doRequest(externalBindRequest) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + packet, err := l.readPacket(msgCtx) + if err != nil { + return err + } + + return GetLDAPError(packet) +} diff --git a/vendor/github.com/mattermost/ldap/client.go b/vendor/github.com/mattermost/ldap/client.go index c7f41f6..619677c 100644 --- a/vendor/github.com/mattermost/ldap/client.go +++ b/vendor/github.com/mattermost/ldap/client.go @@ -8,21 +8,23 @@ import ( // Client knows how to interact with an LDAP server type Client interface { Start() - StartTLS(config *tls.Config) error + StartTLS(*tls.Config) error Close() SetTimeout(time.Duration) Bind(username, password string) error - SimpleBind(simpleBindRequest *SimpleBindRequest) (*SimpleBindResult, error) + UnauthenticatedBind(username string) error + SimpleBind(*SimpleBindRequest) (*SimpleBindResult, error) + ExternalBind() error - Add(addRequest *AddRequest) error - Del(delRequest *DelRequest) error - Modify(modifyRequest *ModifyRequest) error - ModifyDN(modifyDNRequest *ModifyDNRequest) error + Add(*AddRequest) error + Del(*DelRequest) error + Modify(*ModifyRequest) error + ModifyDN(*ModifyDNRequest) error Compare(dn, attribute, value string) (bool, error) - PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*PasswordModifyResult, error) + PasswordModify(*PasswordModifyRequest) (*PasswordModifyResult, error) - Search(searchRequest *SearchRequest) (*SearchResult, error) + Search(*SearchRequest) (*SearchResult, error) SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error) } diff --git a/vendor/github.com/mattermost/ldap/compare.go b/vendor/github.com/mattermost/ldap/compare.go index 5b5013c..04a2e17 100644 --- a/vendor/github.com/mattermost/ldap/compare.go +++ b/vendor/github.com/mattermost/ldap/compare.go @@ -20,53 +20,50 @@ package ldap import ( - "errors" "fmt" - "gopkg.in/asn1-ber.v1" + ber "github.com/go-asn1-ber/asn1-ber" ) -// Compare checks to see if the attribute of the dn matches value. Returns true if it does otherwise -// false with any error that occurs if any. -func (l *Conn) Compare(dn, attribute, value string) (bool, error) { - packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") - packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) +// CompareRequest represents an LDAP CompareRequest operation. +type CompareRequest struct { + DN string + Attribute string + Value string +} - request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationCompareRequest, nil, "Compare Request") - request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, dn, "DN")) +func (req *CompareRequest) appendTo(envelope *ber.Packet) error { + pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationCompareRequest, nil, "Compare Request") + pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN")) ava := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "AttributeValueAssertion") - ava.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "AttributeDesc")) - ava.AppendChild(ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "AssertionValue")) - request.AppendChild(ava) - packet.AppendChild(request) + ava.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.Attribute, "AttributeDesc")) + ava.AppendChild(ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.Value, "AssertionValue")) - l.Debug.PrintPacket(packet) + pkt.AppendChild(ava) - msgCtx, err := l.sendMessage(packet) + envelope.AppendChild(pkt) + + return nil +} + +// Compare checks to see if the attribute of the dn matches value. Returns true if it does otherwise +// false with any error that occurs if any. +func (l *Conn) Compare(dn, attribute, value string) (bool, error) { + msgCtx, err := l.doRequest(&CompareRequest{ + DN: dn, + Attribute: attribute, + Value: value}) if err != nil { return false, err } defer l.finishMessage(msgCtx) - l.Debug.Printf("%d: waiting for response", msgCtx.id) - packetResponse, ok := <-msgCtx.responses - if !ok { - return false, NewError(ErrorNetwork, errors.New("ldap: response channel closed")) - } - packet, err = packetResponse.ReadPacket() - l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + packet, err := l.readPacket(msgCtx) if err != nil { return false, err } - if l.Debug { - if err := addLDAPDescriptions(packet); err != nil { - return false, err - } - ber.PrintPacket(packet) - } - if packet.Children[1].Tag == ApplicationCompareResponse { err := GetLDAPError(packet) diff --git a/vendor/github.com/mattermost/ldap/conn.go b/vendor/github.com/mattermost/ldap/conn.go index c20471f..9e55a5d 100644 --- a/vendor/github.com/mattermost/ldap/conn.go +++ b/vendor/github.com/mattermost/ldap/conn.go @@ -11,7 +11,7 @@ import ( "sync/atomic" "time" - "gopkg.in/asn1-ber.v1" + ber "github.com/go-asn1-ber/asn1-ber" ) const ( @@ -120,7 +120,6 @@ func Dial(network, addr string) (*Conn, error) { return nil, NewError(ErrorNetwork, err) } conn := NewConn(c, false) - conn.Start() return conn, nil } @@ -132,7 +131,6 @@ func DialTLS(network, addr string, config *tls.Config) (*Conn, error) { return nil, NewError(ErrorNetwork, err) } conn := NewConn(c, true) - conn.Start() return conn, nil } @@ -140,7 +138,6 @@ func DialTLS(network, addr string, config *tls.Config) (*Conn, error) { // or ldap:// specified as protocol. On success a new Conn for the connection // is returned. func DialURL(addr string) (*Conn, error) { - lurl, err := url.Parse(addr) if err != nil { return nil, NewError(ErrorNetwork, err) @@ -154,6 +151,11 @@ func DialURL(addr string) (*Conn, error) { } switch lurl.Scheme { + case "ldapi": + if lurl.Path == "" || lurl.Path == "/" { + lurl.Path = "/var/run/slapd/ldapi" + } + return Dial("unix", lurl.Path) case "ldap": if port == "" { port = DefaultLdapPort @@ -187,9 +189,9 @@ func NewConn(conn net.Conn, isTLS bool) *Conn { // Start initializes goroutines to read responses and process messages func (l *Conn) Start() { + l.wgClose.Add(1) go l.reader() go l.processMessages() - l.wgClose.Add(1) } // IsClosing returns whether or not we're currently closing. @@ -274,7 +276,7 @@ func (l *Conn) StartTLS(config *tls.Config) error { l.Close() return err } - ber.PrintPacket(packet) + l.Debug.PrintPacket(packet) } if err := GetLDAPError(packet); err == nil { @@ -447,7 +449,7 @@ func (l *Conn) processMessages() { msgCtx.sendResponse(&PacketResponse{message.Packet, nil}) } else { log.Printf("Received unexpected message %d, %v", message.MessageID, l.IsClosing()) - ber.PrintPacket(message.Packet) + l.Debug.PrintPacket(message.Packet) } case MessageTimeout: // Handle the timeout by closing the channel @@ -490,11 +492,13 @@ func (l *Conn) reader() { // A read error is expected here if we are closing the connection... if !l.IsClosing() { l.closeErr.Store(fmt.Errorf("unable to read LDAP response packet: %s", err)) - l.Debug.Printf("reader error: %s", err.Error()) + l.Debug.Printf("reader error: %s", err) } return } - addLDAPDescriptions(packet) + if err := addLDAPDescriptions(packet); err != nil { + l.Debug.Printf("descriptions error: %s", err) + } if len(packet.Children) == 0 { l.Debug.Printf("Received bad ldap packet") continue diff --git a/vendor/github.com/mattermost/ldap/control.go b/vendor/github.com/mattermost/ldap/control.go index 3f18191..463fe3a 100644 --- a/vendor/github.com/mattermost/ldap/control.go +++ b/vendor/github.com/mattermost/ldap/control.go @@ -4,7 +4,7 @@ import ( "fmt" "strconv" - "gopkg.in/asn1-ber.v1" + "github.com/go-asn1-ber/asn1-ber" ) const ( diff --git a/vendor/github.com/mattermost/ldap/debug.go b/vendor/github.com/mattermost/ldap/debug.go index 7279fc2..dfddd5f 100644 --- a/vendor/github.com/mattermost/ldap/debug.go +++ b/vendor/github.com/mattermost/ldap/debug.go @@ -1,24 +1,37 @@ package ldap import ( + "bytes" "log" - "gopkg.in/asn1-ber.v1" + ber "github.com/go-asn1-ber/asn1-ber" ) +const LDAP_TRACE_PREFIX = "ldap-trace: " + // debugging type // - has a Printf method to write the debug output type debugging bool -// write debug output +// Enable controls debugging mode. +func (debug *debugging) Enable(b bool) { + *debug = debugging(b) +} + +// Printf writes debug output. func (debug debugging) Printf(format string, args ...interface{}) { if debug { + format = LDAP_TRACE_PREFIX + format log.Printf(format, args...) } } +// PrintPacket dumps a packet. func (debug debugging) PrintPacket(packet *ber.Packet) { if debug { - ber.PrintPacket(packet) + var b bytes.Buffer + ber.WritePacket(&b, packet) + textToPrint := LDAP_TRACE_PREFIX + b.String() + log.Printf(textToPrint) } } diff --git a/vendor/github.com/mattermost/ldap/del.go b/vendor/github.com/mattermost/ldap/del.go index 6f78beb..49811d3 100644 --- a/vendor/github.com/mattermost/ldap/del.go +++ b/vendor/github.com/mattermost/ldap/del.go @@ -6,10 +6,9 @@ package ldap import ( - "errors" "log" - "gopkg.in/asn1-ber.v1" + ber "github.com/go-asn1-ber/asn1-ber" ) // DelRequest implements an LDAP deletion request @@ -20,15 +19,20 @@ type DelRequest struct { Controls []Control } -func (d DelRequest) encode() *ber.Packet { - request := ber.Encode(ber.ClassApplication, ber.TypePrimitive, ApplicationDelRequest, d.DN, "Del Request") - request.Data.Write([]byte(d.DN)) - return request +func (req *DelRequest) appendTo(envelope *ber.Packet) error { + pkt := ber.Encode(ber.ClassApplication, ber.TypePrimitive, ApplicationDelRequest, req.DN, "Del Request") + pkt.Data.Write([]byte(req.DN)) + + envelope.AppendChild(pkt) + if len(req.Controls) > 0 { + envelope.AppendChild(encodeControls(req.Controls)) + } + + return nil } // NewDelRequest creates a delete request for the given DN and controls -func NewDelRequest(DN string, - Controls []Control) *DelRequest { +func NewDelRequest(DN string, Controls []Control) *DelRequest { return &DelRequest{ DN: DN, Controls: Controls, @@ -37,39 +41,17 @@ func NewDelRequest(DN string, // Del executes the given delete request func (l *Conn) Del(delRequest *DelRequest) error { - packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") - packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) - packet.AppendChild(delRequest.encode()) - if len(delRequest.Controls) > 0 { - packet.AppendChild(encodeControls(delRequest.Controls)) - } - - l.Debug.PrintPacket(packet) - - msgCtx, err := l.sendMessage(packet) + msgCtx, err := l.doRequest(delRequest) if err != nil { return err } defer l.finishMessage(msgCtx) - l.Debug.Printf("%d: waiting for response", msgCtx.id) - packetResponse, ok := <-msgCtx.responses - if !ok { - return NewError(ErrorNetwork, errors.New("ldap: response channel closed")) - } - packet, err = packetResponse.ReadPacket() - l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + packet, err := l.readPacket(msgCtx) if err != nil { return err } - if l.Debug { - if err := addLDAPDescriptions(packet); err != nil { - return err - } - ber.PrintPacket(packet) - } - if packet.Children[1].Tag == ApplicationDelResponse { err := GetLDAPError(packet) if err != nil { @@ -78,7 +60,5 @@ func (l *Conn) Del(delRequest *DelRequest) error { } else { log.Printf("Unexpected Response: %d", packet.Children[1].Tag) } - - l.Debug.Printf("%d: returning", msgCtx.id) return nil } diff --git a/vendor/github.com/mattermost/ldap/dn.go b/vendor/github.com/mattermost/ldap/dn.go index f89e73a..9d32e7f 100644 --- a/vendor/github.com/mattermost/ldap/dn.go +++ b/vendor/github.com/mattermost/ldap/dn.go @@ -48,7 +48,7 @@ import ( "fmt" "strings" - "gopkg.in/asn1-ber.v1" + "github.com/go-asn1-ber/asn1-ber" ) // AttributeTypeAndValue represents an attributeTypeAndValue from https://tools.ietf.org/html/rfc4514 diff --git a/vendor/github.com/mattermost/ldap/error.go b/vendor/github.com/mattermost/ldap/error.go index 639ed82..b1fda2d 100644 --- a/vendor/github.com/mattermost/ldap/error.go +++ b/vendor/github.com/mattermost/ldap/error.go @@ -3,7 +3,7 @@ package ldap import ( "fmt" - "gopkg.in/asn1-ber.v1" + ber "github.com/go-asn1-ber/asn1-ber" ) // LDAP Result Codes @@ -196,7 +196,9 @@ func (e *Error) Error() string { func GetLDAPError(packet *ber.Packet) error { if packet == nil { return &Error{ResultCode: ErrorUnexpectedResponse, Err: fmt.Errorf("Empty packet")} - } else if len(packet.Children) >= 2 { + } + + if len(packet.Children) >= 2 { response := packet.Children[1] if response == nil { return &Error{ResultCode: ErrorUnexpectedResponse, Err: fmt.Errorf("Empty response in packet")} diff --git a/vendor/github.com/mattermost/ldap/filter.go b/vendor/github.com/mattermost/ldap/filter.go index 4cc4207..a387550 100644 --- a/vendor/github.com/mattermost/ldap/filter.go +++ b/vendor/github.com/mattermost/ldap/filter.go @@ -8,7 +8,7 @@ import ( "strings" "unicode/utf8" - "gopkg.in/asn1-ber.v1" + "github.com/go-asn1-ber/asn1-ber" ) // Filter choices diff --git a/vendor/github.com/mattermost/ldap/go.mod b/vendor/github.com/mattermost/ldap/go.mod new file mode 100644 index 0000000..8c15079 --- /dev/null +++ b/vendor/github.com/mattermost/ldap/go.mod @@ -0,0 +1,5 @@ +module github.com/mattermost/ldap + +require github.com/go-asn1-ber/asn1-ber v1.3.2-0.20191121212151-29be175fc3a3 + +go 1.13 diff --git a/vendor/github.com/mattermost/ldap/go.sum b/vendor/github.com/mattermost/ldap/go.sum new file mode 100644 index 0000000..5eb88fe --- /dev/null +++ b/vendor/github.com/mattermost/ldap/go.sum @@ -0,0 +1,4 @@ +github.com/go-asn1-ber/asn1-ber v1.3.1 h1:gvPdv/Hr++TRFCl0UbPFHC54P9N9jgsRPnmnr419Uck= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-asn1-ber/asn1-ber v1.3.2-0.20191121212151-29be175fc3a3 h1:QW2p25fGTu/S0MvEftCo3wV7aEFHBt2m1DTg1HUwh+o= +github.com/go-asn1-ber/asn1-ber v1.3.2-0.20191121212151-29be175fc3a3/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= diff --git a/vendor/github.com/mattermost/ldap/ldap.go b/vendor/github.com/mattermost/ldap/ldap.go index e6155d5..7dbc951 100644 --- a/vendor/github.com/mattermost/ldap/ldap.go +++ b/vendor/github.com/mattermost/ldap/ldap.go @@ -1,12 +1,11 @@ package ldap import ( - "errors" "fmt" "io/ioutil" "os" - "gopkg.in/asn1-ber.v1" + ber "github.com/go-asn1-ber/asn1-ber" ) // LDAP Application Codes @@ -87,7 +86,7 @@ var BeheraPasswordPolicyErrorMap = map[int8]string{ func addLDAPDescriptions(packet *ber.Packet) (err error) { defer func() { if r := recover(); r != nil { - err = NewError(ErrorDebugging, errors.New("ldap: cannot process packet to add descriptions")) + err = NewError(ErrorDebugging, fmt.Errorf("ldap: cannot process packet to add descriptions: %s", r)) } }() packet.Description = "LDAP Response" diff --git a/vendor/github.com/mattermost/ldap/moddn.go b/vendor/github.com/mattermost/ldap/moddn.go index 803279d..ca48b2b 100644 --- a/vendor/github.com/mattermost/ldap/moddn.go +++ b/vendor/github.com/mattermost/ldap/moddn.go @@ -11,10 +11,9 @@ package ldap import ( - "errors" "log" - "gopkg.in/asn1-ber.v1" + ber "github.com/go-asn1-ber/asn1-ber" ) // ModifyDNRequest holds the request to modify a DN @@ -46,50 +45,34 @@ func NewModifyDNRequest(dn string, rdn string, delOld bool, newSup string) *Modi } } -func (m ModifyDNRequest) encode() *ber.Packet { - request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationModifyDNRequest, nil, "Modify DN Request") - request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, m.DN, "DN")) - request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, m.NewRDN, "New RDN")) - request.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, m.DeleteOldRDN, "Delete old RDN")) - if m.NewSuperior != "" { - request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, m.NewSuperior, "New Superior")) +func (req *ModifyDNRequest) appendTo(envelope *ber.Packet) error { + pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationModifyDNRequest, nil, "Modify DN Request") + pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN")) + pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.NewRDN, "New RDN")) + pkt.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, req.DeleteOldRDN, "Delete old RDN")) + if req.NewSuperior != "" { + pkt.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, req.NewSuperior, "New Superior")) } - return request + + envelope.AppendChild(pkt) + + return nil } // ModifyDN renames the given DN and optionally move to another base (when the "newSup" argument // to NewModifyDNRequest() is not ""). func (l *Conn) ModifyDN(m *ModifyDNRequest) error { - packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") - packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) - packet.AppendChild(m.encode()) - - l.Debug.PrintPacket(packet) - - msgCtx, err := l.sendMessage(packet) + msgCtx, err := l.doRequest(m) if err != nil { return err } defer l.finishMessage(msgCtx) - l.Debug.Printf("%d: waiting for response", msgCtx.id) - packetResponse, ok := <-msgCtx.responses - if !ok { - return NewError(ErrorNetwork, errors.New("ldap: channel closed")) - } - packet, err = packetResponse.ReadPacket() - l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + packet, err := l.readPacket(msgCtx) if err != nil { return err } - if l.Debug { - if err := addLDAPDescriptions(packet); err != nil { - return err - } - ber.PrintPacket(packet) - } - if packet.Children[1].Tag == ApplicationModifyDNResponse { err := GetLDAPError(packet) if err != nil { @@ -98,7 +81,5 @@ func (l *Conn) ModifyDN(m *ModifyDNRequest) error { } else { log.Printf("Unexpected Response: %d", packet.Children[1].Tag) } - - l.Debug.Printf("%d: returning", msgCtx.id) return nil } diff --git a/vendor/github.com/mattermost/ldap/modify.go b/vendor/github.com/mattermost/ldap/modify.go index d83e622..88f65c1 100644 --- a/vendor/github.com/mattermost/ldap/modify.go +++ b/vendor/github.com/mattermost/ldap/modify.go @@ -26,10 +26,9 @@ package ldap import ( - "errors" "log" - "gopkg.in/asn1-ber.v1" + ber "github.com/go-asn1-ber/asn1-ber" ) // Change operation choices @@ -84,40 +83,43 @@ type ModifyRequest struct { } // Add appends the given attribute to the list of changes to be made -func (m *ModifyRequest) Add(attrType string, attrVals []string) { - m.appendChange(AddAttribute, attrType, attrVals) +func (req *ModifyRequest) Add(attrType string, attrVals []string) { + req.appendChange(AddAttribute, attrType, attrVals) } // Delete appends the given attribute to the list of changes to be made -func (m *ModifyRequest) Delete(attrType string, attrVals []string) { - m.appendChange(DeleteAttribute, attrType, attrVals) +func (req *ModifyRequest) Delete(attrType string, attrVals []string) { + req.appendChange(DeleteAttribute, attrType, attrVals) } // Replace appends the given attribute to the list of changes to be made -func (m *ModifyRequest) Replace(attrType string, attrVals []string) { - m.appendChange(ReplaceAttribute, attrType, attrVals) +func (req *ModifyRequest) Replace(attrType string, attrVals []string) { + req.appendChange(ReplaceAttribute, attrType, attrVals) } -func (m *ModifyRequest) appendChange(operation uint, attrType string, attrVals []string) { - m.Changes = append(m.Changes, Change{operation, PartialAttribute{Type: attrType, Vals: attrVals}}) +func (req *ModifyRequest) appendChange(operation uint, attrType string, attrVals []string) { + req.Changes = append(req.Changes, Change{operation, PartialAttribute{Type: attrType, Vals: attrVals}}) } -func (m ModifyRequest) encode() *ber.Packet { - request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationModifyRequest, nil, "Modify Request") - request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, m.DN, "DN")) +func (req *ModifyRequest) appendTo(envelope *ber.Packet) error { + pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationModifyRequest, nil, "Modify Request") + pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN")) changes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Changes") - for _, change := range m.Changes { + for _, change := range req.Changes { changes.AppendChild(change.encode()) } - request.AppendChild(changes) - return request + pkt.AppendChild(changes) + + envelope.AppendChild(pkt) + if len(req.Controls) > 0 { + envelope.AppendChild(encodeControls(req.Controls)) + } + + return nil } // NewModifyRequest creates a modify request for the given DN -func NewModifyRequest( - dn string, - controls []Control, -) *ModifyRequest { +func NewModifyRequest(dn string, controls []Control) *ModifyRequest { return &ModifyRequest{ DN: dn, Controls: controls, @@ -126,39 +128,17 @@ func NewModifyRequest( // Modify performs the ModifyRequest func (l *Conn) Modify(modifyRequest *ModifyRequest) error { - packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") - packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) - packet.AppendChild(modifyRequest.encode()) - if len(modifyRequest.Controls) > 0 { - packet.AppendChild(encodeControls(modifyRequest.Controls)) - } - - l.Debug.PrintPacket(packet) - - msgCtx, err := l.sendMessage(packet) + msgCtx, err := l.doRequest(modifyRequest) if err != nil { return err } defer l.finishMessage(msgCtx) - l.Debug.Printf("%d: waiting for response", msgCtx.id) - packetResponse, ok := <-msgCtx.responses - if !ok { - return NewError(ErrorNetwork, errors.New("ldap: response channel closed")) - } - packet, err = packetResponse.ReadPacket() - l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + packet, err := l.readPacket(msgCtx) if err != nil { return err } - if l.Debug { - if err := addLDAPDescriptions(packet); err != nil { - return err - } - ber.PrintPacket(packet) - } - if packet.Children[1].Tag == ApplicationModifyResponse { err := GetLDAPError(packet) if err != nil { @@ -167,7 +147,5 @@ func (l *Conn) Modify(modifyRequest *ModifyRequest) error { } else { log.Printf("Unexpected Response: %d", packet.Children[1].Tag) } - - l.Debug.Printf("%d: returning", msgCtx.id) return nil } diff --git a/vendor/github.com/mattermost/ldap/passwdmodify.go b/vendor/github.com/mattermost/ldap/passwdmodify.go index 06bc21d..135554d 100644 --- a/vendor/github.com/mattermost/ldap/passwdmodify.go +++ b/vendor/github.com/mattermost/ldap/passwdmodify.go @@ -6,10 +6,9 @@ package ldap import ( - "errors" "fmt" - "gopkg.in/asn1-ber.v1" + ber "github.com/go-asn1-ber/asn1-ber" ) const ( @@ -36,25 +35,28 @@ type PasswordModifyResult struct { Referral string } -func (r *PasswordModifyRequest) encode() (*ber.Packet, error) { - request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Password Modify Extended Operation") - request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, passwordModifyOID, "Extended Request Name: Password Modify OID")) +func (req *PasswordModifyRequest) appendTo(envelope *ber.Packet) error { + pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Password Modify Extended Operation") + pkt.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, passwordModifyOID, "Extended Request Name: Password Modify OID")) + extendedRequestValue := ber.Encode(ber.ClassContext, ber.TypePrimitive, 1, nil, "Extended Request Value: Password Modify Request") passwordModifyRequestValue := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Password Modify Request") - if r.UserIdentity != "" { - passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, r.UserIdentity, "User Identity")) + if req.UserIdentity != "" { + passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, req.UserIdentity, "User Identity")) } - if r.OldPassword != "" { - passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 1, r.OldPassword, "Old Password")) + if req.OldPassword != "" { + passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 1, req.OldPassword, "Old Password")) } - if r.NewPassword != "" { - passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 2, r.NewPassword, "New Password")) + if req.NewPassword != "" { + passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 2, req.NewPassword, "New Password")) } - extendedRequestValue.AppendChild(passwordModifyRequestValue) - request.AppendChild(extendedRequestValue) - return request, nil + pkt.AppendChild(extendedRequestValue) + + envelope.AppendChild(pkt) + + return nil } // NewPasswordModifyRequest creates a new PasswordModifyRequest @@ -84,46 +86,18 @@ func NewPasswordModifyRequest(userIdentity string, oldPassword string, newPasswo // PasswordModify performs the modification request func (l *Conn) PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*PasswordModifyResult, error) { - packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") - packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) - - encodedPasswordModifyRequest, err := passwordModifyRequest.encode() - if err != nil { - return nil, err - } - packet.AppendChild(encodedPasswordModifyRequest) - - l.Debug.PrintPacket(packet) - - msgCtx, err := l.sendMessage(packet) + msgCtx, err := l.doRequest(passwordModifyRequest) if err != nil { return nil, err } defer l.finishMessage(msgCtx) - result := &PasswordModifyResult{} - - l.Debug.Printf("%d: waiting for response", msgCtx.id) - packetResponse, ok := <-msgCtx.responses - if !ok { - return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed")) - } - packet, err = packetResponse.ReadPacket() - l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + packet, err := l.readPacket(msgCtx) if err != nil { return nil, err } - if packet == nil { - return nil, NewError(ErrorNetwork, errors.New("ldap: could not retrieve message")) - } - - if l.Debug { - if err := addLDAPDescriptions(packet); err != nil { - return nil, err - } - ber.PrintPacket(packet) - } + result := &PasswordModifyResult{} if packet.Children[1].Tag == ApplicationExtendedResponse { err := GetLDAPError(packet) diff --git a/vendor/github.com/mattermost/ldap/request.go b/vendor/github.com/mattermost/ldap/request.go new file mode 100644 index 0000000..8c68f34 --- /dev/null +++ b/vendor/github.com/mattermost/ldap/request.go @@ -0,0 +1,66 @@ +package ldap + +import ( + "errors" + + ber "github.com/go-asn1-ber/asn1-ber" +) + +var ( + errRespChanClosed = errors.New("ldap: response channel closed") + errCouldNotRetMsg = errors.New("ldap: could not retrieve message") +) + +type request interface { + appendTo(*ber.Packet) error +} + +type requestFunc func(*ber.Packet) error + +func (f requestFunc) appendTo(p *ber.Packet) error { + return f(p) +} + +func (l *Conn) doRequest(req request) (*messageContext, error) { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + if err := req.appendTo(packet); err != nil { + return nil, err + } + + if l.Debug { + l.Debug.PrintPacket(packet) + } + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return nil, err + } + l.Debug.Printf("%d: returning", msgCtx.id) + return msgCtx, nil +} + +func (l *Conn) readPacket(msgCtx *messageContext) (*ber.Packet, error) { + l.Debug.Printf("%d: waiting for response", msgCtx.id) + packetResponse, ok := <-msgCtx.responses + if !ok { + return nil, NewError(ErrorNetwork, errRespChanClosed) + } + packet, err := packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return nil, err + } + + if packet == nil { + return nil, NewError(ErrorNetwork, errCouldNotRetMsg) + } + + if l.Debug { + if err = addLDAPDescriptions(packet); err != nil { + return nil, err + } + l.Debug.PrintPacket(packet) + } + return packet, nil +} diff --git a/vendor/github.com/mattermost/ldap/search.go b/vendor/github.com/mattermost/ldap/search.go index 3aa6dac..186246c 100644 --- a/vendor/github.com/mattermost/ldap/search.go +++ b/vendor/github.com/mattermost/ldap/search.go @@ -61,7 +61,7 @@ import ( "sort" "strings" - "gopkg.in/asn1-ber.v1" + ber "github.com/go-asn1-ber/asn1-ber" ) // scope choices @@ -246,27 +246,33 @@ type SearchRequest struct { Controls []Control } -func (s *SearchRequest) encode() (*ber.Packet, error) { - request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationSearchRequest, nil, "Search Request") - request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, s.BaseDN, "Base DN")) - request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(s.Scope), "Scope")) - request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(s.DerefAliases), "Deref Aliases")) - request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(s.SizeLimit), "Size Limit")) - request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(s.TimeLimit), "Time Limit")) - request.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, s.TypesOnly, "Types Only")) +func (req *SearchRequest) appendTo(envelope *ber.Packet) error { + pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationSearchRequest, nil, "Search Request") + pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.BaseDN, "Base DN")) + pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(req.Scope), "Scope")) + pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(req.DerefAliases), "Deref Aliases")) + pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(req.SizeLimit), "Size Limit")) + pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(req.TimeLimit), "Time Limit")) + pkt.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, req.TypesOnly, "Types Only")) // compile and encode filter - filterPacket, err := CompileFilter(s.Filter) + filterPacket, err := CompileFilter(req.Filter) if err != nil { - return nil, err + return err } - request.AppendChild(filterPacket) + pkt.AppendChild(filterPacket) // encode attributes attributesPacket := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes") - for _, attribute := range s.Attributes { + for _, attribute := range req.Attributes { attributesPacket.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute")) } - request.AppendChild(attributesPacket) - return request, nil + pkt.AppendChild(attributesPacket) + + envelope.AppendChild(pkt) + if len(req.Controls) > 0 { + envelope.AppendChild(encodeControls(req.Controls)) + } + + return nil } // NewSearchRequest creates a new search request @@ -366,22 +372,7 @@ func (l *Conn) SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) // Search performs the given search request func (l *Conn) Search(searchRequest *SearchRequest) (*SearchResult, error) { - packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") - packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) - // encode search request - encodedSearchRequest, err := searchRequest.encode() - if err != nil { - return nil, err - } - packet.AppendChild(encodedSearchRequest) - // encode search controls - if len(searchRequest.Controls) > 0 { - packet.AppendChild(encodeControls(searchRequest.Controls)) - } - - l.Debug.PrintPacket(packet) - - msgCtx, err := l.sendMessage(packet) + msgCtx, err := l.doRequest(searchRequest) if err != nil { return nil, err } @@ -392,26 +383,12 @@ func (l *Conn) Search(searchRequest *SearchRequest) (*SearchResult, error) { Referrals: make([]string, 0), Controls: make([]Control, 0)} - foundSearchResultDone := false - for !foundSearchResultDone { - l.Debug.Printf("%d: waiting for response", msgCtx.id) - packetResponse, ok := <-msgCtx.responses - if !ok { - return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed")) - } - packet, err = packetResponse.ReadPacket() - l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + for { + packet, err := l.readPacket(msgCtx) if err != nil { return nil, err } - if l.Debug { - if err := addLDAPDescriptions(packet); err != nil { - return nil, err - } - ber.PrintPacket(packet) - } - switch packet.Children[1].Tag { case 4: entry := new(Entry) @@ -440,11 +417,9 @@ func (l *Conn) Search(searchRequest *SearchRequest) (*SearchResult, error) { result.Controls = append(result.Controls, decodedChild) } } - foundSearchResultDone = true + return result, nil case 19: result.Referrals = append(result.Referrals, packet.Children[1].Children[0].Value.(string)) } } - l.Debug.Printf("%d: returning", msgCtx.id) - return result, nil } diff --git a/vendor/github.com/mattermost/logr/.gitignore b/vendor/github.com/mattermost/logr/.gitignore new file mode 100644 index 0000000..c2c0a9e --- /dev/null +++ b/vendor/github.com/mattermost/logr/.gitignore @@ -0,0 +1,36 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib +debug +dynip + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Output of profiler +*.prof + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +# IntelliJ config +.idea + +# log files +*.log + +# transient directories +vendor +output +build +app +logs + +# test apps +test/cmd/testapp1/testapp1 +test/cmd/simple/simple diff --git a/vendor/github.com/mattermost/logr/.travis.yml b/vendor/github.com/mattermost/logr/.travis.yml new file mode 100644 index 0000000..e6c7caf --- /dev/null +++ b/vendor/github.com/mattermost/logr/.travis.yml @@ -0,0 +1,4 @@ +language: go +sudo: false +go: + - 1.x \ No newline at end of file diff --git a/vendor/github.com/mattermost/logr/LICENSE b/vendor/github.com/mattermost/logr/LICENSE new file mode 100644 index 0000000..3bea678 --- /dev/null +++ b/vendor/github.com/mattermost/logr/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 wiggin77 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattermost/logr/README.md b/vendor/github.com/mattermost/logr/README.md new file mode 100644 index 0000000..a25d6de --- /dev/null +++ b/vendor/github.com/mattermost/logr/README.md @@ -0,0 +1,193 @@ +# logr + +[![GoDoc](https://godoc.org/github.com/mattermost/logr?status.svg)](http://godoc.org/github.com/mattermost/logr) +[![Report Card](https://goreportcard.com/badge/github.com/mattermost/logr)](https://goreportcard.com/report/github.com/mattermost/logr) + +Logr is a fully asynchronous, contextual logger for Go. + +It is very much inspired by [Logrus](https://github.com/sirupsen/logrus) but addresses two issues: + +1. Logr is fully asynchronous, meaning that all formatting and writing is done in the background. Latency sensitive applications benefit from not waiting for logging to complete. + +2. Logr provides custom filters which provide more flexibility than Trace, Debug, Info... levels. If you need to temporarily increase verbosity of logging while tracking down a problem you can avoid the fire-hose that typically comes from Debug or Trace by using custom filters. + +## Concepts + + +| entity | description | +| ------ | ----------- | +| Logr | Engine instance typically instantiated once; used to configure logging.
    ```lgr := &Logr{}```| +| Logger | Provides contextual logging via fields; lightweight, can be created once and accessed globally or create on demand.
    ```logger := lgr.NewLogger()```
    ```logger2 := logger.WithField("user", "Sam")```| +| Target | A destination for log items such as console, file, database or just about anything that can be written to. Each target has its own filter/level and formatter, and any number of targets can be added to a Logr. Targets for syslog and any io.Writer are built-in and it is easy to create your own. You can also use any [Logrus hooks](https://github.com/sirupsen/logrus/wiki/Hooks) via a simple [adapter](https://github.com/wiggin77/logrus4logr).| +| Filter | Determines which logging calls get written versus filtered out. Also determines which logging calls generate a stack trace.
    ```filter := &logr.StdFilter{Lvl: logr.Warn, Stacktrace: logr.Fatal}```| +| Formatter | Formats the output. Logr includes built-in formatters for JSON and plain text with delimiters. It is easy to create your own formatters or you can also use any [Logrus formatters](https://github.com/sirupsen/logrus#formatters) via a simple [adapter](https://github.com/wiggin77/logrus4logr).
    ```formatter := &format.Plain{Delim: " \| "}```| + +## Usage + +```go +// Create Logr instance. +lgr := &logr.Logr{} + +// Create a filter and formatter. Both can be shared by multiple +// targets. +filter := &logr.StdFilter{Lvl: logr.Warn, Stacktrace: logr.Error} +formatter := &format.Plain{Delim: " | "} + +// WriterTarget outputs to any io.Writer +t := target.NewWriterTarget(filter, formatter, os.StdOut, 1000) +lgr.AddTarget(t) + +// One or more Loggers can be created, shared, used concurrently, +// or created on demand. +logger := lgr.NewLogger().WithField("user", "Sarah") + +// Now we can log to the target(s). +logger.Debug("login attempt") +logger.Error("login failed") + +// Ensure targets are drained before application exit. +lgr.Shutdown() +``` + +## Fields + +Fields allow for contextual logging, meaning information can be added to log statements without changing the statements themselves. Information can be shared across multiple logging statements thus allowing log analysis tools to group them. + +Fields are added via Loggers: + +```go +lgr := &Logr{} +// ... add targets ... +logger := lgr.NewLogger().WithFields(logr.Fields{ + "user": user, + "role": role}) +logger.Info("login attempt") +// ... later ... +logger.Info("login successful") +``` + +`Logger.WithFields` can be used to create additional Loggers that add more fields. + +Logr fields are inspired by and work the same as [Logrus fields](https://github.com/sirupsen/logrus#fields). + +## Filters + +Logr supports the traditional seven log levels via `logr.StdFilter`: Panic, Fatal, Error, Warning, Info, Debug, and Trace. + +```go +// When added to a target, this filter will only allow +// log statements with level severity Warn or higher. +// It will also generate stack traces for Error or higher. +filter := &logr.StdFilter{Lvl: logr.Warn, Stacktrace: logr.Error} +``` + +Logr also supports custom filters (logr.CustomFilter) which allow fine grained inclusion of log items without turning on the fire-hose. + +```go + // create custom levels; use IDs > 10. + LoginLevel := logr.Level{ID: 100, Name: "login ", Stacktrace: false} + LogoutLevel := logr.Level{ID: 101, Name: "logout", Stacktrace: false} + + lgr := &logr.Logr{} + + // create a custom filter with custom levels. + filter := &logr.CustomFilter{} + filter.Add(LoginLevel, LogoutLevel) + + formatter := &format.Plain{Delim: " | "} + tgr := target.NewWriterTarget(filter, formatter, os.StdOut, 1000) + lgr.AddTarget(tgr) + logger := lgr.NewLogger().WithFields(logr.Fields{"user": "Bob", "role": "admin"}) + + logger.Log(LoginLevel, "this item will get logged") + logger.Debug("won't be logged since Debug wasn't added to custom filter") +``` + +Both filter types allow you to determine which levels require a stack trace to be output. Note that generating stack traces cannot happen fully asynchronously and thus add latency to the calling goroutine. + +## Targets + +There are built-in targets for outputting to syslog, file, or any `io.Writer`. More will be added. + +You can use any [Logrus hooks](https://github.com/sirupsen/logrus/wiki/Hooks) via a simple [adapter](https://github.com/wiggin77/logrus4logr). + +You can create your own target by implementing the [Target](./target.go) interface. + +An easier method is to use the [logr.Basic](./target.go) type target and build your functionality on that. Basic handles all the queuing and other plumbing so you only need to implement two methods. Example target that outputs to `io.Writer`: + +```go +type Writer struct { + logr.Basic + out io.Writer +} + +func NewWriterTarget(filter logr.Filter, formatter logr.Formatter, out io.Writer, maxQueue int) *Writer { + w := &Writer{out: out} + w.Basic.Start(w, w, filter, formatter, maxQueue) + return w +} + +// Write will always be called by a single goroutine, so no locking needed. +// Just convert a log record to a []byte using the formatter and output the +// bytes to your sink. +func (w *Writer) Write(rec *logr.LogRec) error { + _, stacktrace := w.IsLevelEnabled(rec.Level()) + + // take a buffer from the pool to avoid allocations or just allocate a new one. + buf := rec.Logger().Logr().BorrowBuffer() + defer rec.Logger().Logr().ReleaseBuffer(buf) + + buf, err := w.Formatter().Format(rec, stacktrace, buf) + if err != nil { + return err + } + _, err = w.out.Write(buf.Bytes()) + return err +} +``` + +## Formatters + +Logr has two built-in formatters, one for JSON and the other plain, delimited text. + +You can use any [Logrus formatters](https://github.com/sirupsen/logrus#formatters) via a simple [adapter](https://github.com/wiggin77/logrus4logr). + +You can create your own formatter by implementing the [Formatter](./formatter.go) interface: + +```go +Format(rec *LogRec, stacktrace bool, buf *bytes.Buffer) (*bytes.Buffer, error) +``` + +## Handlers + +When creating the Logr instance, you can add several handlers that get called when exceptional events occur: + +### ```Logr.OnLoggerError(err error)``` + +Called any time an internal logging error occurs. For example, this can happen when a target cannot connect to its data sink. + +It may be tempting to log this error, however there is a danger that logging this will simply generate another error and so on. If you must log it, use a target and custom level specifically for this event and ensure it cannot generate more errors. + +### ```Logr.OnQueueFull func(rec *LogRec, maxQueueSize int) bool``` + +Called on an attempt to add a log record to a full Logr queue. This generally means the Logr maximum queue size is too small, or at least one target is very slow. Logr maximum queue size can be changed before adding any targets via: + +```go +lgr := logr.Logr{MaxQueueSize: 10000} +``` + +Returning true will drop the log record. False will block until the log record can be added, which creates a natural throttle at the expense of latency for the calling goroutine. The default is to block. + +### ```Logr.OnTargetQueueFull func(target Target, rec *LogRec, maxQueueSize int) bool``` + +Called on an attempt to add a log record to a full target queue. This generally means your target's max queue size is too small, or the target is very slow to output. + +As with the Logr queue, returning true will drop the log record. False will block until the log record can be added, which creates a natural throttle at the expense of latency for the calling goroutine. The default is to block. + +### ```Logr.OnExit func(code int) and Logr.OnPanic func(err interface{})``` + +OnExit and OnPanic are called when the Logger.FatalXXX and Logger.PanicXXX functions are called respectively. + +In both cases the default behavior is to shut down gracefully, draining all targets, and calling `os.Exit` or `panic` respectively. + +When adding your own handlers, be sure to call `Logr.Shutdown` before exiting the application to avoid losing log records. diff --git a/vendor/github.com/mattermost/logr/config.go b/vendor/github.com/mattermost/logr/config.go new file mode 100644 index 0000000..83d4b0c --- /dev/null +++ b/vendor/github.com/mattermost/logr/config.go @@ -0,0 +1,11 @@ +package logr + +import ( + "fmt" + + "github.com/wiggin77/cfg" +) + +func ConfigLogger(config *cfg.Config) error { + return fmt.Errorf("Not implemented yet") +} diff --git a/vendor/github.com/mattermost/logr/const.go b/vendor/github.com/mattermost/logr/const.go new file mode 100644 index 0000000..704d050 --- /dev/null +++ b/vendor/github.com/mattermost/logr/const.go @@ -0,0 +1,34 @@ +package logr + +import "time" + +// Defaults. +const ( + // DefaultMaxQueueSize is the default maximum queue size for Logr instances. + DefaultMaxQueueSize = 1000 + + // DefaultMaxStackFrames is the default maximum max number of stack frames collected + // when generating stack traces for logging. + DefaultMaxStackFrames = 30 + + // MaxLevelID is the maximum value of a level ID. Some level cache implementations will + // allocate a cache of this size. Cannot exceed uint. + MaxLevelID = 256 + + // DefaultEnqueueTimeout is the default amount of time a log record can take to be queued. + // This only applies to blocking enqueue which happen after `logr.OnQueueFull` is called + // and returns false. + DefaultEnqueueTimeout = time.Second * 30 + + // DefaultShutdownTimeout is the default amount of time `logr.Shutdown` can execute before + // timing out. + DefaultShutdownTimeout = time.Second * 30 + + // DefaultFlushTimeout is the default amount of time `logr.Flush` can execute before + // timing out. + DefaultFlushTimeout = time.Second * 30 + + // DefaultMaxPooledBuffer is the maximum size a pooled buffer can be. + // Buffers that grow beyond this size are garbage collected. + DefaultMaxPooledBuffer = 1024 * 1024 +) diff --git a/vendor/github.com/mattermost/logr/filter.go b/vendor/github.com/mattermost/logr/filter.go new file mode 100644 index 0000000..6e654cd --- /dev/null +++ b/vendor/github.com/mattermost/logr/filter.go @@ -0,0 +1,26 @@ +package logr + +// LevelID is the unique id of each level. +type LevelID uint + +// Level provides a mechanism to enable/disable specific log lines. +type Level struct { + ID LevelID + Name string + Stacktrace bool +} + +// String returns the name of this level. +func (level Level) String() string { + return level.Name +} + +// Filter allows targets to determine which Level(s) are active +// for logging and which Level(s) require a stack trace to be output. +// A default implementation using "panic, fatal..." is provided, and +// a more flexible alternative implementation is also provided that +// allows any number of custom levels. +type Filter interface { + IsEnabled(Level) bool + IsStacktraceEnabled(Level) bool +} diff --git a/vendor/github.com/mattermost/logr/format/json.go b/vendor/github.com/mattermost/logr/format/json.go new file mode 100644 index 0000000..8f56c6c --- /dev/null +++ b/vendor/github.com/mattermost/logr/format/json.go @@ -0,0 +1,273 @@ +package format + +import ( + "bytes" + "fmt" + "runtime" + "sort" + "sync" + "time" + + "github.com/francoispqt/gojay" + "github.com/mattermost/logr" +) + +// ContextField is a name/value pair within the context fields. +type ContextField struct { + Key string + Val interface{} +} + +// JSON formats log records as JSON. +type JSON struct { + // DisableTimestamp disables output of timestamp field. + DisableTimestamp bool + // DisableLevel disables output of level field. + DisableLevel bool + // DisableMsg disables output of msg field. + DisableMsg bool + // DisableContext disables output of all context fields. + DisableContext bool + // DisableStacktrace disables output of stack trace. + DisableStacktrace bool + + // TimestampFormat is an optional format for timestamps. If empty + // then DefTimestampFormat is used. + TimestampFormat string + + // Deprecated: this has no effect. + Indent string + + // EscapeHTML determines if certain characters (e.g. `<`, `>`, `&`) + // are escaped. + EscapeHTML bool + + // KeyTimestamp overrides the timestamp field key name. + KeyTimestamp string + + // KeyLevel overrides the level field key name. + KeyLevel string + + // KeyMsg overrides the msg field key name. + KeyMsg string + + // KeyContextFields when not empty will group all context fields + // under this key. + KeyContextFields string + + // KeyStacktrace overrides the stacktrace field key name. + KeyStacktrace string + + // ContextSorter allows custom sorting for the context fields. + ContextSorter func(fields logr.Fields) []ContextField + + once sync.Once +} + +// Format converts a log record to bytes in JSON format. +func (j *JSON) Format(rec *logr.LogRec, stacktrace bool, buf *bytes.Buffer) (*bytes.Buffer, error) { + j.once.Do(j.applyDefaultKeyNames) + + if buf == nil { + buf = &bytes.Buffer{} + } + enc := gojay.BorrowEncoder(buf) + defer func() { + enc.Release() + }() + + sorter := j.ContextSorter + if sorter == nil { + sorter = j.defaultContextSorter + } + + jlr := JSONLogRec{ + LogRec: rec, + JSON: j, + stacktrace: stacktrace, + sorter: sorter, + } + + err := enc.EncodeObject(jlr) + if err != nil { + return nil, err + } + buf.WriteByte('\n') + return buf, nil +} + +func (j *JSON) applyDefaultKeyNames() { + if j.KeyTimestamp == "" { + j.KeyTimestamp = "timestamp" + } + if j.KeyLevel == "" { + j.KeyLevel = "level" + } + if j.KeyMsg == "" { + j.KeyMsg = "msg" + } + if j.KeyStacktrace == "" { + j.KeyStacktrace = "stacktrace" + } +} + +// defaultContextSorter sorts the context fields alphabetically by key. +func (j *JSON) defaultContextSorter(fields logr.Fields) []ContextField { + keys := make([]string, 0, len(fields)) + for k := range fields { + keys = append(keys, k) + } + sort.Strings(keys) + + cf := make([]ContextField, 0, len(keys)) + for _, k := range keys { + cf = append(cf, ContextField{Key: k, Val: fields[k]}) + } + return cf +} + +// JSONLogRec decorates a LogRec adding JSON encoding. +type JSONLogRec struct { + *logr.LogRec + *JSON + stacktrace bool + sorter func(fields logr.Fields) []ContextField +} + +// MarshalJSONObject encodes the LogRec as JSON. +func (rec JSONLogRec) MarshalJSONObject(enc *gojay.Encoder) { + if !rec.DisableTimestamp { + timestampFmt := rec.TimestampFormat + if timestampFmt == "" { + timestampFmt = logr.DefTimestampFormat + } + time := rec.Time() + enc.AddTimeKey(rec.KeyTimestamp, &time, timestampFmt) + } + if !rec.DisableLevel { + enc.AddStringKey(rec.KeyLevel, rec.Level().Name) + } + if !rec.DisableMsg { + enc.AddStringKey(rec.KeyMsg, rec.Msg()) + } + if !rec.DisableContext { + ctxFields := rec.sorter(rec.Fields()) + if rec.KeyContextFields != "" { + enc.AddObjectKey(rec.KeyContextFields, jsonFields(ctxFields)) + } else { + if len(ctxFields) > 0 { + for _, cf := range ctxFields { + key := rec.prefixCollision(cf.Key) + encodeField(enc, key, cf.Val) + } + } + } + } + if rec.stacktrace && !rec.DisableStacktrace { + frames := rec.StackFrames() + if len(frames) > 0 { + enc.AddArrayKey(rec.KeyStacktrace, stackFrames(frames)) + } + } + +} + +// IsNil returns true if the LogRec pointer is nil. +func (rec JSONLogRec) IsNil() bool { + return rec.LogRec == nil +} + +func (rec JSONLogRec) prefixCollision(key string) string { + switch key { + case rec.KeyTimestamp, rec.KeyLevel, rec.KeyMsg, rec.KeyStacktrace: + return rec.prefixCollision("_" + key) + } + return key +} + +type stackFrames []runtime.Frame + +// MarshalJSONArray encodes stackFrames slice as JSON. +func (s stackFrames) MarshalJSONArray(enc *gojay.Encoder) { + for _, frame := range s { + enc.AddObject(stackFrame(frame)) + } +} + +// IsNil returns true if stackFrames is empty slice. +func (s stackFrames) IsNil() bool { + return len(s) == 0 +} + +type stackFrame runtime.Frame + +// MarshalJSONArray encodes stackFrame as JSON. +func (f stackFrame) MarshalJSONObject(enc *gojay.Encoder) { + enc.AddStringKey("Function", f.Function) + enc.AddStringKey("File", f.File) + enc.AddIntKey("Line", f.Line) +} + +func (f stackFrame) IsNil() bool { + return false +} + +type jsonFields []ContextField + +// MarshalJSONObject encodes Fields map to JSON. +func (f jsonFields) MarshalJSONObject(enc *gojay.Encoder) { + for _, ctxField := range f { + encodeField(enc, ctxField.Key, ctxField.Val) + } +} + +// IsNil returns true if map is nil. +func (f jsonFields) IsNil() bool { + return f == nil +} + +func encodeField(enc *gojay.Encoder, key string, val interface{}) { + switch vt := val.(type) { + case gojay.MarshalerJSONObject: + enc.AddObjectKey(key, vt) + case gojay.MarshalerJSONArray: + enc.AddArrayKey(key, vt) + case string: + enc.AddStringKey(key, vt) + case error: + enc.AddStringKey(key, vt.Error()) + case bool: + enc.AddBoolKey(key, vt) + case int: + enc.AddIntKey(key, vt) + case int64: + enc.AddInt64Key(key, vt) + case int32: + enc.AddIntKey(key, int(vt)) + case int16: + enc.AddIntKey(key, int(vt)) + case int8: + enc.AddIntKey(key, int(vt)) + case uint64: + enc.AddIntKey(key, int(vt)) + case uint32: + enc.AddIntKey(key, int(vt)) + case uint16: + enc.AddIntKey(key, int(vt)) + case uint8: + enc.AddIntKey(key, int(vt)) + case float64: + enc.AddFloatKey(key, vt) + case float32: + enc.AddFloat32Key(key, vt) + case *gojay.EmbeddedJSON: + enc.AddEmbeddedJSONKey(key, vt) + case time.Time: + enc.AddTimeKey(key, &vt, logr.DefTimestampFormat) + case *time.Time: + enc.AddTimeKey(key, vt, logr.DefTimestampFormat) + default: + s := fmt.Sprintf("%v", vt) + enc.AddStringKey(key, s) + } +} diff --git a/vendor/github.com/mattermost/logr/format/plain.go b/vendor/github.com/mattermost/logr/format/plain.go new file mode 100644 index 0000000..3fa92b4 --- /dev/null +++ b/vendor/github.com/mattermost/logr/format/plain.go @@ -0,0 +1,75 @@ +package format + +import ( + "bytes" + "fmt" + + "github.com/mattermost/logr" +) + +// Plain is the simplest formatter, outputting only text with +// no colors. +type Plain struct { + // DisableTimestamp disables output of timestamp field. + DisableTimestamp bool + // DisableLevel disables output of level field. + DisableLevel bool + // DisableMsg disables output of msg field. + DisableMsg bool + // DisableContext disables output of all context fields. + DisableContext bool + // DisableStacktrace disables output of stack trace. + DisableStacktrace bool + + // Delim is an optional delimiter output between each log field. + // Defaults to a single space. + Delim string + + // TimestampFormat is an optional format for timestamps. If empty + // then DefTimestampFormat is used. + TimestampFormat string +} + +// Format converts a log record to bytes. +func (p *Plain) Format(rec *logr.LogRec, stacktrace bool, buf *bytes.Buffer) (*bytes.Buffer, error) { + delim := p.Delim + if delim == "" { + delim = " " + } + if buf == nil { + buf = &bytes.Buffer{} + } + + timestampFmt := p.TimestampFormat + if timestampFmt == "" { + timestampFmt = logr.DefTimestampFormat + } + + if !p.DisableTimestamp { + var arr [128]byte + tbuf := rec.Time().AppendFormat(arr[:0], timestampFmt) + buf.Write(tbuf) + buf.WriteString(delim) + } + if !p.DisableLevel { + fmt.Fprintf(buf, "%v%s", rec.Level().Name, delim) + } + if !p.DisableMsg { + fmt.Fprint(buf, rec.Msg(), delim) + } + if !p.DisableContext { + ctx := rec.Fields() + if len(ctx) > 0 { + logr.WriteFields(buf, ctx, " ") + } + } + if stacktrace && !p.DisableStacktrace { + frames := rec.StackFrames() + if len(frames) > 0 { + buf.WriteString("\n") + logr.WriteStacktrace(buf, rec.StackFrames()) + } + } + buf.WriteString("\n") + return buf, nil +} diff --git a/vendor/github.com/mattermost/logr/formatter.go b/vendor/github.com/mattermost/logr/formatter.go new file mode 100644 index 0000000..bb8df2d --- /dev/null +++ b/vendor/github.com/mattermost/logr/formatter.go @@ -0,0 +1,119 @@ +package logr + +import ( + "bytes" + "fmt" + "io" + "runtime" + "sort" +) + +// Formatter turns a LogRec into a formatted string. +type Formatter interface { + // Format converts a log record to bytes. If buf is not nil then it will be + // be filled with the formatted results, otherwise a new buffer will be allocated. + Format(rec *LogRec, stacktrace bool, buf *bytes.Buffer) (*bytes.Buffer, error) +} + +const ( + // DefTimestampFormat is the default time stamp format used by + // Plain formatter and others. + DefTimestampFormat = "2006-01-02 15:04:05.000 Z07:00" +) + +// DefaultFormatter is the default formatter, outputting only text with +// no colors and a space delimiter. Use `format.Plain` instead. +type DefaultFormatter struct { +} + +// Format converts a log record to bytes. +func (p *DefaultFormatter) Format(rec *LogRec, stacktrace bool, buf *bytes.Buffer) (*bytes.Buffer, error) { + if buf == nil { + buf = &bytes.Buffer{} + } + delim := " " + timestampFmt := DefTimestampFormat + + fmt.Fprintf(buf, "%s%s", rec.Time().Format(timestampFmt), delim) + fmt.Fprintf(buf, "%v%s", rec.Level(), delim) + fmt.Fprint(buf, rec.Msg(), delim) + + ctx := rec.Fields() + if len(ctx) > 0 { + WriteFields(buf, ctx, " ") + } + + if stacktrace { + frames := rec.StackFrames() + if len(frames) > 0 { + buf.WriteString("\n") + WriteStacktrace(buf, rec.StackFrames()) + } + } + buf.WriteString("\n") + + return buf, nil +} + +// WriteFields writes zero or more name value pairs to the io.Writer. +// The pairs are sorted by key name and output in key=value format +// with optional separator between fields. +func WriteFields(w io.Writer, flds Fields, separator string) { + keys := make([]string, 0, len(flds)) + for k := range flds { + keys = append(keys, k) + } + sort.Strings(keys) + sep := "" + for _, key := range keys { + writeField(w, key, flds[key], sep) + sep = separator + } +} + +func writeField(w io.Writer, key string, val interface{}, sep string) { + var template string + switch v := val.(type) { + case error: + val := v.Error() + if shouldQuote(val) { + template = "%s%s=%q" + } else { + template = "%s%s=%s" + } + case string: + if shouldQuote(v) { + template = "%s%s=%q" + } else { + template = "%s%s=%s" + } + default: + template = "%s%s=%v" + } + fmt.Fprintf(w, template, sep, key, val) +} + +// shouldQuote returns true if val contains any characters that might be unsafe +// when injecting log output into an aggregator, viewer or report. +func shouldQuote(val string) bool { + for _, c := range val { + if !((c >= '0' && c <= '9') || + (c >= 'a' && c <= 'z') || + (c >= 'A' && c <= 'Z')) { + return true + } + } + return false +} + +// WriteStacktrace formats and outputs a stack trace to an io.Writer. +func WriteStacktrace(w io.Writer, frames []runtime.Frame) { + for _, frame := range frames { + if frame.Function != "" { + fmt.Fprintf(w, " %s\n", frame.Function) + } + if frame.File != "" { + fmt.Fprintf(w, " %s:%d\n", frame.File, frame.Line) + } + } +} diff --git a/vendor/github.com/mattermost/logr/go.mod b/vendor/github.com/mattermost/logr/go.mod new file mode 100644 index 0000000..e8e8acf --- /dev/null +++ b/vendor/github.com/mattermost/logr/go.mod @@ -0,0 +1,11 @@ +module github.com/mattermost/logr + +go 1.12 + +require ( + github.com/francoispqt/gojay v1.2.13 + github.com/stretchr/testify v1.2.2 + github.com/wiggin77/cfg v1.0.2 + github.com/wiggin77/merror v1.0.2 + gopkg.in/natefinch/lumberjack.v2 v2.0.0 +) diff --git a/vendor/github.com/mattermost/logr/go.sum b/vendor/github.com/mattermost/logr/go.sum new file mode 100644 index 0000000..ea68851 --- /dev/null +++ b/vendor/github.com/mattermost/logr/go.sum @@ -0,0 +1,174 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/wiggin77/cfg v1.0.2 h1:NBUX+iJRr+RTncTqTNvajHwzduqbhCQjEqxLHr6Fk7A= +github.com/wiggin77/cfg v1.0.2/go.mod h1:b3gotba2e5bXTqTW48DwIFoLc+4lWKP7WPi/CdvZ4aE= +github.com/wiggin77/merror v1.0.2 h1:V0nH9eFp64ASyaXC+pB5WpvBoCg7NUwvaCSKdzlcHqw= +github.com/wiggin77/merror v1.0.2/go.mod h1:uQTcIU0Z6jRK4OwqganPYerzQxSFJ4GSHM3aurxxQpg= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/vendor/github.com/mattermost/logr/levelcache.go b/vendor/github.com/mattermost/logr/levelcache.go new file mode 100644 index 0000000..2cefb61 --- /dev/null +++ b/vendor/github.com/mattermost/logr/levelcache.go @@ -0,0 +1,98 @@ +package logr + +import ( + "fmt" + "sync" +) + +// LevelStatus represents whether a level is enabled and +// requires a stack trace. +type LevelStatus struct { + Enabled bool + Stacktrace bool + empty bool +} + +type levelCache interface { + setup() + get(id LevelID) (LevelStatus, bool) + put(id LevelID, status LevelStatus) error + clear() +} + +// syncMapLevelCache uses sync.Map which may better handle large concurrency +// scenarios. +type syncMapLevelCache struct { + m sync.Map +} + +func (c *syncMapLevelCache) setup() { + c.clear() +} + +func (c *syncMapLevelCache) get(id LevelID) (LevelStatus, bool) { + if id > MaxLevelID { + return LevelStatus{}, false + } + s, _ := c.m.Load(id) + status := s.(LevelStatus) + return status, !status.empty +} + +func (c *syncMapLevelCache) put(id LevelID, status LevelStatus) error { + if id > MaxLevelID { + return fmt.Errorf("level id cannot exceed MaxLevelID (%d)", MaxLevelID) + } + c.m.Store(id, status) + return nil +} + +func (c *syncMapLevelCache) clear() { + var i LevelID + for i = 0; i < MaxLevelID; i++ { + c.m.Store(i, LevelStatus{empty: true}) + } +} + +// arrayLevelCache using array and a mutex. +type arrayLevelCache struct { + arr [MaxLevelID + 1]LevelStatus + mux sync.RWMutex +} + +func (c *arrayLevelCache) setup() { + c.clear() +} + +//var dummy = LevelStatus{} + +func (c *arrayLevelCache) get(id LevelID) (LevelStatus, bool) { + if id > MaxLevelID { + return LevelStatus{}, false + } + c.mux.RLock() + status := c.arr[id] + ok := !status.empty + c.mux.RUnlock() + return status, ok +} + +func (c *arrayLevelCache) put(id LevelID, status LevelStatus) error { + if id > MaxLevelID { + return fmt.Errorf("level id cannot exceed MaxLevelID (%d)", MaxLevelID) + } + c.mux.Lock() + defer c.mux.Unlock() + + c.arr[id] = status + return nil +} + +func (c *arrayLevelCache) clear() { + c.mux.Lock() + defer c.mux.Unlock() + + for i := range c.arr { + c.arr[i] = LevelStatus{empty: true} + } +} diff --git a/vendor/github.com/mattermost/logr/levelcustom.go b/vendor/github.com/mattermost/logr/levelcustom.go new file mode 100644 index 0000000..384fe4e --- /dev/null +++ b/vendor/github.com/mattermost/logr/levelcustom.go @@ -0,0 +1,45 @@ +package logr + +import ( + "sync" +) + +// CustomFilter allows targets to enable logging via a list of levels. +type CustomFilter struct { + mux sync.RWMutex + levels map[LevelID]Level +} + +// IsEnabled returns true if the specified Level exists in this list. +func (st *CustomFilter) IsEnabled(level Level) bool { + st.mux.RLock() + defer st.mux.RUnlock() + _, ok := st.levels[level.ID] + return ok +} + +// IsStacktraceEnabled returns true if the specified Level requires a stack trace. +func (st *CustomFilter) IsStacktraceEnabled(level Level) bool { + st.mux.RLock() + defer st.mux.RUnlock() + lvl, ok := st.levels[level.ID] + if ok { + return lvl.Stacktrace + } + return false +} + +// Add adds one or more levels to the list. Adding a level enables logging for +// that level on any targets using this CustomFilter. +func (st *CustomFilter) Add(levels ...Level) { + st.mux.Lock() + defer st.mux.Unlock() + + if st.levels == nil { + st.levels = make(map[LevelID]Level) + } + + for _, s := range levels { + st.levels[s.ID] = s + } +} diff --git a/vendor/github.com/mattermost/logr/levelstd.go b/vendor/github.com/mattermost/logr/levelstd.go new file mode 100644 index 0000000..f5e0fa4 --- /dev/null +++ b/vendor/github.com/mattermost/logr/levelstd.go @@ -0,0 +1,37 @@ +package logr + +// StdFilter allows targets to filter via classic log levels where any level +// beyond a certain verbosity/severity is enabled. +type StdFilter struct { + Lvl Level + Stacktrace Level +} + +// IsEnabled returns true if the specified Level is at or above this verbosity. Also +// determines if a stack trace is required. +func (lt StdFilter) IsEnabled(level Level) bool { + return level.ID <= lt.Lvl.ID +} + +// IsStacktraceEnabled returns true if the specified Level requires a stack trace. +func (lt StdFilter) IsStacktraceEnabled(level Level) bool { + return level.ID <= lt.Stacktrace.ID +} + +var ( + // Panic is the highest level of severity. Logs the message and then panics. + Panic = Level{ID: 0, Name: "panic"} + // Fatal designates a catastrophic error. Logs the message and then calls + // `logr.Exit(1)`. + Fatal = Level{ID: 1, Name: "fatal"} + // Error designates a serious but possibly recoverable error. + Error = Level{ID: 2, Name: "error"} + // Warn designates non-critical error. + Warn = Level{ID: 3, Name: "warn"} + // Info designates information regarding application events. + Info = Level{ID: 4, Name: "info"} + // Debug designates verbose information typically used for debugging. + Debug = Level{ID: 5, Name: "debug"} + // Trace designates the highest verbosity of log output. + Trace = Level{ID: 6, Name: "trace"} +) diff --git a/vendor/github.com/mattermost/logr/logger.go b/vendor/github.com/mattermost/logr/logger.go new file mode 100644 index 0000000..c238631 --- /dev/null +++ b/vendor/github.com/mattermost/logr/logger.go @@ -0,0 +1,218 @@ +package logr + +import ( + "fmt" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Logger provides context for logging via fields. +type Logger struct { + logr *Logr + fields Fields +} + +// Logr returns the `Logr` instance that created this `Logger`. +func (logger Logger) Logr() *Logr { + return logger.logr +} + +// WithField creates a new `Logger` with any existing fields +// plus the new one. +func (logger Logger) WithField(key string, value interface{}) Logger { + return logger.WithFields(Fields{key: value}) +} + +// WithFields creates a new `Logger` with any existing fields +// plus the new ones. +func (logger Logger) WithFields(fields Fields) Logger { + l := Logger{logr: logger.logr} + // if parent has no fields then avoid creating a new map. + oldLen := len(logger.fields) + if oldLen == 0 { + l.fields = fields + return l + } + + l.fields = make(Fields, len(fields)+oldLen) + for k, v := range logger.fields { + l.fields[k] = v + } + for k, v := range fields { + l.fields[k] = v + } + return l +} + +// Log checks that the level matches one or more targets, and +// if so, generates a log record that is added to the Logr queue. +// Arguments are handled in the manner of fmt.Print. +func (logger Logger) Log(lvl Level, args ...interface{}) { + status := logger.logr.IsLevelEnabled(lvl) + if status.Enabled { + rec := NewLogRec(lvl, logger, "", args, status.Stacktrace) + logger.logr.enqueue(rec) + } +} + +// Trace is a convenience method equivalent to `Log(TraceLevel, args...)`. +func (logger Logger) Trace(args ...interface{}) { + logger.Log(Trace, args...) +} + +// Debug is a convenience method equivalent to `Log(DebugLevel, args...)`. +func (logger Logger) Debug(args ...interface{}) { + logger.Log(Debug, args...) +} + +// Print ensures compatibility with std lib logger. +func (logger Logger) Print(args ...interface{}) { + logger.Info(args...) +} + +// Info is a convenience method equivalent to `Log(InfoLevel, args...)`. +func (logger Logger) Info(args ...interface{}) { + logger.Log(Info, args...) +} + +// Warn is a convenience method equivalent to `Log(WarnLevel, args...)`. +func (logger Logger) Warn(args ...interface{}) { + logger.Log(Warn, args...) +} + +// Error is a convenience method equivalent to `Log(ErrorLevel, args...)`. +func (logger Logger) Error(args ...interface{}) { + logger.Log(Error, args...) +} + +// Fatal is a convenience method equivalent to `Log(FatalLevel, args...)` +// followed by a call to os.Exit(1). +func (logger Logger) Fatal(args ...interface{}) { + logger.Log(Fatal, args...) + logger.logr.exit(1) +} + +// Panic is a convenience method equivalent to `Log(PanicLevel, args...)` +// followed by a call to panic(). +func (logger Logger) Panic(args ...interface{}) { + logger.Log(Panic, args...) + panic(fmt.Sprint(args...)) +} + +// +// Printf style +// + +// Logf checks that the level matches one or more targets, and +// if so, generates a log record that is added to the main +// queue (channel). Arguments are handled in the manner of fmt.Printf. +func (logger Logger) Logf(lvl Level, format string, args ...interface{}) { + status := logger.logr.IsLevelEnabled(lvl) + if status.Enabled { + rec := NewLogRec(lvl, logger, format, args, status.Stacktrace) + logger.logr.enqueue(rec) + } +} + +// Tracef is a convenience method equivalent to `Logf(TraceLevel, args...)`. +func (logger Logger) Tracef(format string, args ...interface{}) { + logger.Logf(Trace, format, args...) +} + +// Debugf is a convenience method equivalent to `Logf(DebugLevel, args...)`. +func (logger Logger) Debugf(format string, args ...interface{}) { + logger.Logf(Debug, format, args...) +} + +// Infof is a convenience method equivalent to `Logf(InfoLevel, args...)`. +func (logger Logger) Infof(format string, args ...interface{}) { + logger.Logf(Info, format, args...) +} + +// Printf ensures compatibility with std lib logger. +func (logger Logger) Printf(format string, args ...interface{}) { + logger.Infof(format, args...) +} + +// Warnf is a convenience method equivalent to `Logf(WarnLevel, args...)`. +func (logger Logger) Warnf(format string, args ...interface{}) { + logger.Logf(Warn, format, args...) +} + +// Errorf is a convenience method equivalent to `Logf(ErrorLevel, args...)`. +func (logger Logger) Errorf(format string, args ...interface{}) { + logger.Logf(Error, format, args...) +} + +// Fatalf is a convenience method equivalent to `Logf(FatalLevel, args...)` +// followed by a call to os.Exit(1). +func (logger Logger) Fatalf(format string, args ...interface{}) { + logger.Logf(Fatal, format, args...) + logger.logr.exit(1) +} + +// Panicf is a convenience method equivalent to `Logf(PanicLevel, args...)` +// followed by a call to panic(). +func (logger Logger) Panicf(format string, args ...interface{}) { + logger.Logf(Panic, format, args...) +} + +// +// Println style +// + +// Logln checks that the level matches one or more targets, and +// if so, generates a log record that is added to the main +// queue (channel). Arguments are handled in the manner of fmt.Println. +func (logger Logger) Logln(lvl Level, args ...interface{}) { + status := logger.logr.IsLevelEnabled(lvl) + if status.Enabled { + rec := NewLogRec(lvl, logger, "", args, status.Stacktrace) + rec.newline = true + logger.logr.enqueue(rec) + } +} + +// Traceln is a convenience method equivalent to `Logln(TraceLevel, args...)`. +func (logger Logger) Traceln(args ...interface{}) { + logger.Logln(Trace, args...) +} + +// Debugln is a convenience method equivalent to `Logln(DebugLevel, args...)`. +func (logger Logger) Debugln(args ...interface{}) { + logger.Logln(Debug, args...) +} + +// Infoln is a convenience method equivalent to `Logln(InfoLevel, args...)`. +func (logger Logger) Infoln(args ...interface{}) { + logger.Logln(Info, args...) +} + +// Println ensures compatibility with std lib logger. +func (logger Logger) Println(args ...interface{}) { + logger.Infoln(args...) +} + +// Warnln is a convenience method equivalent to `Logln(WarnLevel, args...)`. +func (logger Logger) Warnln(args ...interface{}) { + logger.Logln(Warn, args...) +} + +// Errorln is a convenience method equivalent to `Logln(ErrorLevel, args...)`. +func (logger Logger) Errorln(args ...interface{}) { + logger.Logln(Error, args...) +} + +// Fatalln is a convenience method equivalent to `Logln(FatalLevel, args...)` +// followed by a call to os.Exit(1). +func (logger Logger) Fatalln(args ...interface{}) { + logger.Logln(Fatal, args...) + logger.logr.exit(1) +} + +// Panicln is a convenience method equivalent to `Logln(PanicLevel, args...)` +// followed by a call to panic(). +func (logger Logger) Panicln(args ...interface{}) { + logger.Logln(Panic, args...) +} diff --git a/vendor/github.com/mattermost/logr/logr.go b/vendor/github.com/mattermost/logr/logr.go new file mode 100644 index 0000000..631366a --- /dev/null +++ b/vendor/github.com/mattermost/logr/logr.go @@ -0,0 +1,664 @@ +package logr + +import ( + "bytes" + "context" + "errors" + "fmt" + "os" + "sync" + "time" + + "github.com/wiggin77/cfg" + "github.com/wiggin77/merror" +) + +// Logr maintains a list of log targets and accepts incoming +// log records. +type Logr struct { + tmux sync.RWMutex // target mutex + targets []Target + + mux sync.RWMutex + maxQueueSizeActual int + in chan *LogRec + done chan struct{} + once sync.Once + shutdown bool + lvlCache levelCache + + metricsInitOnce sync.Once + metricsCloseOnce sync.Once + metricsDone chan struct{} + metrics MetricsCollector + queueSizeGauge Gauge + loggedCounter Counter + errorCounter Counter + + bufferPool sync.Pool + + // MaxQueueSize is the maximum number of log records that can be queued. + // If exceeded, `OnQueueFull` is called which determines if the log + // record will be dropped or block until add is successful. + // If this is modified, it must be done before `Configure` or + // `AddTarget`. Defaults to DefaultMaxQueueSize. + MaxQueueSize int + + // OnLoggerError, when not nil, is called any time an internal + // logging error occurs. For example, this can happen when a + // target cannot connect to its data sink. + OnLoggerError func(error) + + // OnQueueFull, when not nil, is called on an attempt to add + // a log record to a full Logr queue. + // `MaxQueueSize` can be used to modify the maximum queue size. + // This function should return quickly, with a bool indicating whether + // the log record should be dropped (true) or block until the log record + // is successfully added (false). If nil then blocking (false) is assumed. + OnQueueFull func(rec *LogRec, maxQueueSize int) bool + + // OnTargetQueueFull, when not nil, is called on an attempt to add + // a log record to a full target queue provided the target supports reporting + // this condition. + // This function should return quickly, with a bool indicating whether + // the log record should be dropped (true) or block until the log record + // is successfully added (false). If nil then blocking (false) is assumed. + OnTargetQueueFull func(target Target, rec *LogRec, maxQueueSize int) bool + + // OnExit, when not nil, is called when a FatalXXX style log API is called. + // When nil, then the default behavior is to cleanly shut down this Logr and + // call `os.Exit(code)`. + OnExit func(code int) + + // OnPanic, when not nil, is called when a PanicXXX style log API is called. + // When nil, then the default behavior is to cleanly shut down this Logr and + // call `panic(err)`. + OnPanic func(err interface{}) + + // EnqueueTimeout is the amount of time a log record can take to be queued. + // This only applies to blocking enqueue which happen after `logr.OnQueueFull` + // is called and returns false. + EnqueueTimeout time.Duration + + // ShutdownTimeout is the amount of time `logr.Shutdown` can execute before + // timing out. + ShutdownTimeout time.Duration + + // FlushTimeout is the amount of time `logr.Flush` can execute before + // timing out. + FlushTimeout time.Duration + + // UseSyncMapLevelCache can be set to true before the first target is added + // when high concurrency (e.g. >32 cores) is expected. This may improve + // performance with large numbers of cores - benchmark for your use case. + UseSyncMapLevelCache bool + + // MaxPooledFormatBuffer determines the maximum size of a buffer that can be + // pooled. To reduce allocations, the buffers needed during formatting (etc) + // are pooled. A very large log item will grow a buffer that could stay in + // memory indefinitely. This settings lets you control how big a pooled buffer + // can be - anything larger will be garbage collected after use. + // Defaults to 1MB. + MaxPooledBuffer int + + // DisableBufferPool when true disables the buffer pool. See MaxPooledBuffer. + DisableBufferPool bool + + // MetricsUpdateFreqMillis determines how often polled metrics are updated + // when metrics are enabled. + MetricsUpdateFreqMillis int64 +} + +// Configure adds/removes targets via the supplied `Config`. +func (logr *Logr) Configure(config *cfg.Config) error { + // TODO + return fmt.Errorf("not implemented yet") +} + +func (logr *Logr) ensureInit() { + logr.once.Do(func() { + defer func() { + go logr.start() + }() + + logr.mux.Lock() + defer logr.mux.Unlock() + + logr.maxQueueSizeActual = logr.MaxQueueSize + if logr.maxQueueSizeActual == 0 { + logr.maxQueueSizeActual = DefaultMaxQueueSize + } + + if logr.maxQueueSizeActual < 0 { + logr.maxQueueSizeActual = 0 + } + + logr.in = make(chan *LogRec, logr.maxQueueSizeActual) + logr.done = make(chan struct{}) + + if logr.UseSyncMapLevelCache { + logr.lvlCache = &syncMapLevelCache{} + } else { + logr.lvlCache = &arrayLevelCache{} + } + + if logr.MaxPooledBuffer == 0 { + logr.MaxPooledBuffer = DefaultMaxPooledBuffer + } + logr.bufferPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + } + + logr.lvlCache.setup() + }) +} + +// AddTarget adds one or more targets to the logger which will receive +// log records for outputting. +func (logr *Logr) AddTarget(targets ...Target) error { + if logr.IsShutdown() { + return fmt.Errorf("AddTarget called after Logr shut down") + } + + logr.ensureInit() + metrics := logr.getMetricsCollector() + defer logr.ResetLevelCache() // call this after tmux is released + + logr.tmux.Lock() + defer logr.tmux.Unlock() + + errs := merror.New() + for _, t := range targets { + if t == nil { + continue + } + + logr.targets = append(logr.targets, t) + if metrics != nil { + if tm, ok := t.(TargetWithMetrics); ok { + if err := tm.EnableMetrics(metrics, logr.MetricsUpdateFreqMillis); err != nil { + errs.Append(err) + } + } + } + } + return errs.ErrorOrNil() +} + +// NewLogger creates a Logger using defaults. A `Logger` is light-weight +// enough to create on-demand, but typically one or more Loggers are +// created and re-used. +func (logr *Logr) NewLogger() Logger { + logger := Logger{logr: logr} + return logger +} + +var levelStatusDisabled = LevelStatus{} + +// IsLevelEnabled returns true if at least one target has the specified +// level enabled. The result is cached so that subsequent checks are fast. +func (logr *Logr) IsLevelEnabled(lvl Level) LevelStatus { + status, ok := logr.isLevelEnabledFromCache(lvl) + if ok { + return status + } + + // Check each target. + logr.tmux.RLock() + for _, t := range logr.targets { + e, s := t.IsLevelEnabled(lvl) + if e { + status.Enabled = true + if s { + status.Stacktrace = true + break // if both enabled then no sense checking more targets + } + } + } + logr.tmux.RUnlock() + + // Cache and return the result. + if err := logr.updateLevelCache(lvl.ID, status); err != nil { + logr.ReportError(err) + return LevelStatus{} + } + return status +} + +func (logr *Logr) isLevelEnabledFromCache(lvl Level) (LevelStatus, bool) { + logr.mux.RLock() + defer logr.mux.RUnlock() + + // Don't accept new log records after shutdown. + if logr.shutdown { + return levelStatusDisabled, true + } + + // Check cache. lvlCache may still be nil if no targets added. + if logr.lvlCache == nil { + return levelStatusDisabled, true + } + status, ok := logr.lvlCache.get(lvl.ID) + if ok { + return status, true + } + return LevelStatus{}, false +} + +func (logr *Logr) updateLevelCache(id LevelID, status LevelStatus) error { + logr.mux.RLock() + defer logr.mux.RUnlock() + if logr.lvlCache != nil { + return logr.lvlCache.put(id, status) + } + return nil +} + +// HasTargets returns true only if at least one target exists within the Logr. +func (logr *Logr) HasTargets() bool { + logr.tmux.RLock() + defer logr.tmux.RUnlock() + return len(logr.targets) > 0 +} + +// TargetInfo provides name and type for a Target. +type TargetInfo struct { + Name string + Type string +} + +// TargetInfos enumerates all the targets added to this Logr. +// The resulting slice represents a snapshot at time of calling. +func (logr *Logr) TargetInfos() []TargetInfo { + logr.tmux.RLock() + defer logr.tmux.RUnlock() + + infos := make([]TargetInfo, 0) + + for _, t := range logr.targets { + inf := TargetInfo{ + Name: fmt.Sprintf("%v", t), + Type: fmt.Sprintf("%T", t), + } + infos = append(infos, inf) + } + return infos +} + +// RemoveTargets safely removes one or more targets based on the filtering method. +// f should return true to delete the target, false to keep it. +// When removing a target, best effort is made to write any queued log records before +// closing, with cxt determining how much time can be spent in total. +// Note, keep the timeout short since this method blocks certain logging operations. +func (logr *Logr) RemoveTargets(cxt context.Context, f func(ti TargetInfo) bool) error { + var removed bool + defer func() { + if removed { + // call this after tmux is released since + // it will lock mux and we don't want to + // introduce possible deadlock. + logr.ResetLevelCache() + } + }() + + errs := merror.New() + + logr.tmux.Lock() + defer logr.tmux.Unlock() + + cp := make([]Target, 0) + + for _, t := range logr.targets { + inf := TargetInfo{ + Name: fmt.Sprintf("%v", t), + Type: fmt.Sprintf("%T", t), + } + if f(inf) { + if err := t.Shutdown(cxt); err != nil { + errs.Append(err) + } + removed = true + } else { + cp = append(cp, t) + } + } + logr.targets = cp + return errs.ErrorOrNil() +} + +// ResetLevelCache resets the cached results of `IsLevelEnabled`. This is +// called any time a Target is added or a target's level is changed. +func (logr *Logr) ResetLevelCache() { + // Write lock so that new cache entries cannot be stored while we + // clear the cache. + logr.mux.Lock() + defer logr.mux.Unlock() + logr.resetLevelCache() +} + +// resetLevelCache empties the level cache without locking. +// mux.Lock must be held before calling this function. +func (logr *Logr) resetLevelCache() { + // lvlCache may still be nil if no targets added. + if logr.lvlCache != nil { + logr.lvlCache.clear() + } +} + +// enqueue adds a log record to the logr queue. If the queue is full then +// this function either blocks or the log record is dropped, depending on +// the result of calling `OnQueueFull`. +func (logr *Logr) enqueue(rec *LogRec) { + if logr.in == nil { + logr.ReportError(fmt.Errorf("AddTarget or Configure must be called before enqueue")) + } + + select { + case logr.in <- rec: + default: + if logr.OnQueueFull != nil && logr.OnQueueFull(rec, logr.maxQueueSizeActual) { + return // drop the record + } + select { + case <-time.After(logr.enqueueTimeout()): + logr.ReportError(fmt.Errorf("enqueue timed out for log rec [%v]", rec)) + case logr.in <- rec: // block until success or timeout + } + } +} + +// exit is called by one of the FatalXXX style APIS. If `logr.OnExit` is not nil +// then that method is called, otherwise the default behavior is to shut down this +// Logr cleanly then call `os.Exit(code)`. +func (logr *Logr) exit(code int) { + if logr.OnExit != nil { + logr.OnExit(code) + return + } + + if err := logr.Shutdown(); err != nil { + logr.ReportError(err) + } + os.Exit(code) +} + +// panic is called by one of the PanicXXX style APIS. If `logr.OnPanic` is not nil +// then that method is called, otherwise the default behavior is to shut down this +// Logr cleanly then call `panic(err)`. +func (logr *Logr) panic(err interface{}) { + if logr.OnPanic != nil { + logr.OnPanic(err) + return + } + + if err := logr.Shutdown(); err != nil { + logr.ReportError(err) + } + panic(err) +} + +// Flush blocks while flushing the logr queue and all target queues, by +// writing existing log records to valid targets. +// Any attempts to add new log records will block until flush is complete. +// `logr.FlushTimeout` determines how long flush can execute before +// timing out. Use `IsTimeoutError` to determine if the returned error is +// due to a timeout. +func (logr *Logr) Flush() error { + ctx, cancel := context.WithTimeout(context.Background(), logr.flushTimeout()) + defer cancel() + return logr.FlushWithTimeout(ctx) +} + +// Flush blocks while flushing the logr queue and all target queues, by +// writing existing log records to valid targets. +// Any attempts to add new log records will block until flush is complete. +// Use `IsTimeoutError` to determine if the returned error is +// due to a timeout. +func (logr *Logr) FlushWithTimeout(ctx context.Context) error { + if !logr.HasTargets() { + return nil + } + + if logr.IsShutdown() { + return errors.New("Flush called on shut down Logr") + } + + rec := newFlushLogRec(logr.NewLogger()) + logr.enqueue(rec) + + select { + case <-ctx.Done(): + return newTimeoutError("logr queue shutdown timeout") + case <-rec.flush: + } + return nil +} + +// IsShutdown returns true if this Logr instance has been shut down. +// No further log records can be enqueued and no targets added after +// shutdown. +func (logr *Logr) IsShutdown() bool { + logr.mux.Lock() + defer logr.mux.Unlock() + return logr.shutdown +} + +// Shutdown cleanly stops the logging engine after making best efforts +// to flush all targets. Call this function right before application +// exit - logr cannot be restarted once shut down. +// `logr.ShutdownTimeout` determines how long shutdown can execute before +// timing out. Use `IsTimeoutError` to determine if the returned error is +// due to a timeout. +func (logr *Logr) Shutdown() error { + ctx, cancel := context.WithTimeout(context.Background(), logr.shutdownTimeout()) + defer cancel() + return logr.ShutdownWithTimeout(ctx) +} + +// Shutdown cleanly stops the logging engine after making best efforts +// to flush all targets. Call this function right before application +// exit - logr cannot be restarted once shut down. +// Use `IsTimeoutError` to determine if the returned error is due to a +// timeout. +func (logr *Logr) ShutdownWithTimeout(ctx context.Context) error { + logr.mux.Lock() + if logr.shutdown { + logr.mux.Unlock() + return errors.New("Shutdown called again after shut down") + } + logr.shutdown = true + logr.resetLevelCache() + logr.mux.Unlock() + + logr.metricsCloseOnce.Do(func() { + if logr.metricsDone != nil { + close(logr.metricsDone) + } + }) + + errs := merror.New() + + // close the incoming channel and wait for read loop to exit. + if logr.in != nil { + close(logr.in) + select { + case <-ctx.Done(): + errs.Append(newTimeoutError("logr queue shutdown timeout")) + case <-logr.done: + } + } + + // logr.in channel should now be drained to targets and no more log records + // can be added. + logr.tmux.RLock() + defer logr.tmux.RUnlock() + for _, t := range logr.targets { + err := t.Shutdown(ctx) + if err != nil { + errs.Append(err) + } + } + return errs.ErrorOrNil() +} + +// ReportError is used to notify the host application of any internal logging errors. +// If `OnLoggerError` is not nil, it is called with the error, otherwise the error is +// output to `os.Stderr`. +func (logr *Logr) ReportError(err interface{}) { + logr.incErrorCounter() + + if logr.OnLoggerError == nil { + fmt.Fprintln(os.Stderr, err) + return + } + logr.OnLoggerError(fmt.Errorf("%v", err)) +} + +// BorrowBuffer borrows a buffer from the pool. Release the buffer to reduce garbage collection. +func (logr *Logr) BorrowBuffer() *bytes.Buffer { + if logr.DisableBufferPool { + return &bytes.Buffer{} + } + return logr.bufferPool.Get().(*bytes.Buffer) +} + +// ReleaseBuffer returns a buffer to the pool to reduce garbage collection. The buffer is only +// retained if less than MaxPooledBuffer. +func (logr *Logr) ReleaseBuffer(buf *bytes.Buffer) { + if !logr.DisableBufferPool && buf.Cap() < logr.MaxPooledBuffer { + buf.Reset() + logr.bufferPool.Put(buf) + } +} + +// enqueueTimeout returns amount of time a log record can take to be queued. +// This only applies to blocking enqueue which happen after `logr.OnQueueFull` is called +// and returns false. +func (logr *Logr) enqueueTimeout() time.Duration { + if logr.EnqueueTimeout == 0 { + return DefaultEnqueueTimeout + } + return logr.EnqueueTimeout +} + +// shutdownTimeout returns the timeout duration for `logr.Shutdown`. +func (logr *Logr) shutdownTimeout() time.Duration { + if logr.ShutdownTimeout == 0 { + return DefaultShutdownTimeout + } + return logr.ShutdownTimeout +} + +// flushTimeout returns the timeout duration for `logr.Flush`. +func (logr *Logr) flushTimeout() time.Duration { + if logr.FlushTimeout == 0 { + return DefaultFlushTimeout + } + return logr.FlushTimeout +} + +// start selects on incoming log records until done channel signals. +// Incoming log records are fanned out to all log targets. +func (logr *Logr) start() { + defer func() { + if r := recover(); r != nil { + logr.ReportError(r) + go logr.start() + } + }() + + for rec := range logr.in { + if rec.flush != nil { + logr.flush(rec.flush) + } else { + rec.prep() + logr.fanout(rec) + } + } + close(logr.done) +} + +// startMetricsUpdater updates the metrics for any polled values every `MetricsUpdateFreqSecs` seconds until +// logr is closed. +func (logr *Logr) startMetricsUpdater() { + for { + updateFreq := logr.getMetricsUpdateFreqMillis() + if updateFreq == 0 { + updateFreq = DefMetricsUpdateFreqMillis + } + if updateFreq < 250 { + updateFreq = 250 // don't peg the CPU + } + + select { + case <-logr.metricsDone: + return + case <-time.After(time.Duration(updateFreq) * time.Millisecond): + logr.setQueueSizeGauge(float64(len(logr.in))) + } + } +} + +func (logr *Logr) getMetricsUpdateFreqMillis() int64 { + logr.mux.RLock() + defer logr.mux.RUnlock() + return logr.MetricsUpdateFreqMillis +} + +// fanout pushes a LogRec to all targets. +func (logr *Logr) fanout(rec *LogRec) { + var target Target + defer func() { + if r := recover(); r != nil { + logr.ReportError(fmt.Errorf("fanout failed for target %s, %v", target, r)) + } + }() + + var logged bool + defer func() { + if logged { + logr.incLoggedCounter() // call this after tmux is released + } + }() + + logr.tmux.RLock() + defer logr.tmux.RUnlock() + for _, target = range logr.targets { + if enabled, _ := target.IsLevelEnabled(rec.Level()); enabled { + target.Log(rec) + logged = true + } + } +} + +// flush drains the queue and notifies when done. +func (logr *Logr) flush(done chan<- struct{}) { + // first drain the logr queue. +loop: + for { + var rec *LogRec + select { + case rec = <-logr.in: + if rec.flush == nil { + rec.prep() + logr.fanout(rec) + } + default: + break loop + } + } + + logger := logr.NewLogger() + + // drain all the targets; block until finished. + logr.tmux.RLock() + defer logr.tmux.RUnlock() + for _, target := range logr.targets { + rec := newFlushLogRec(logger) + target.Log(rec) + <-rec.flush + } + done <- struct{}{} +} diff --git a/vendor/github.com/mattermost/logr/logrec.go b/vendor/github.com/mattermost/logr/logrec.go new file mode 100644 index 0000000..9428aae --- /dev/null +++ b/vendor/github.com/mattermost/logr/logrec.go @@ -0,0 +1,189 @@ +package logr + +import ( + "fmt" + "runtime" + "strings" + "sync" + "time" +) + +var ( + logrPkg string +) + +func init() { + // Calc current package name + pcs := make([]uintptr, 2) + _ = runtime.Callers(0, pcs) + tmp := runtime.FuncForPC(pcs[1]).Name() + logrPkg = getPackageName(tmp) +} + +// LogRec collects raw, unformatted data to be logged. +// TODO: pool these? how to reliably know when targets are done with them? Copy for each target? +type LogRec struct { + mux sync.RWMutex + time time.Time + + level Level + logger Logger + + template string + newline bool + args []interface{} + + stackPC []uintptr + stackCount int + + // flushes Logr and target queues when not nil. + flush chan struct{} + + // remaining fields calculated by `prep` + msg string + frames []runtime.Frame +} + +// NewLogRec creates a new LogRec with the current time and optional stack trace. +func NewLogRec(lvl Level, logger Logger, template string, args []interface{}, incStacktrace bool) *LogRec { + rec := &LogRec{time: time.Now(), logger: logger, level: lvl, template: template, args: args} + if incStacktrace { + rec.stackPC = make([]uintptr, DefaultMaxStackFrames) + rec.stackCount = runtime.Callers(2, rec.stackPC) + } + return rec +} + +// newFlushLogRec creates a LogRec that flushes the Logr queue and +// any target queues that support flushing. +func newFlushLogRec(logger Logger) *LogRec { + return &LogRec{logger: logger, flush: make(chan struct{})} +} + +// prep resolves all args and field values to strings, and +// resolves stack trace to frames. +func (rec *LogRec) prep() { + rec.mux.Lock() + defer rec.mux.Unlock() + + // resolve args + if rec.template == "" { + if rec.newline { + rec.msg = fmt.Sprintln(rec.args...) + } else { + rec.msg = fmt.Sprint(rec.args...) + } + } else { + rec.msg = fmt.Sprintf(rec.template, rec.args...) + } + + // resolve stack trace + if rec.stackCount > 0 { + frames := runtime.CallersFrames(rec.stackPC[:rec.stackCount]) + for { + f, more := frames.Next() + rec.frames = append(rec.frames, f) + if !more { + break + } + } + + // remove leading logr package entries. + var start int + for i, frame := range rec.frames { + pkg := getPackageName(frame.Function) + if pkg != "" && pkg != logrPkg { + start = i + break + } + } + rec.frames = rec.frames[start:] + } +} + +// WithTime returns a shallow copy of the log record while replacing +// the time. This can be used by targets and formatters to adjust +// the time, or take ownership of the log record. +func (rec *LogRec) WithTime(time time.Time) *LogRec { + rec.mux.RLock() + defer rec.mux.RUnlock() + + return &LogRec{ + time: time, + level: rec.level, + logger: rec.logger, + template: rec.template, + newline: rec.newline, + args: rec.args, + msg: rec.msg, + stackPC: rec.stackPC, + stackCount: rec.stackCount, + frames: rec.frames, + } +} + +// Logger returns the `Logger` that created this `LogRec`. +func (rec *LogRec) Logger() Logger { + return rec.logger +} + +// Time returns this log record's time stamp. +func (rec *LogRec) Time() time.Time { + // no locking needed as this field is not mutated. + return rec.time +} + +// Level returns this log record's Level. +func (rec *LogRec) Level() Level { + // no locking needed as this field is not mutated. + return rec.level +} + +// Fields returns this log record's Fields. +func (rec *LogRec) Fields() Fields { + // no locking needed as this field is not mutated. + return rec.logger.fields +} + +// Msg returns this log record's message text. +func (rec *LogRec) Msg() string { + rec.mux.RLock() + defer rec.mux.RUnlock() + return rec.msg +} + +// StackFrames returns this log record's stack frames or +// nil if no stack trace was required. +func (rec *LogRec) StackFrames() []runtime.Frame { + rec.mux.RLock() + defer rec.mux.RUnlock() + return rec.frames +} + +// String returns a string representation of this log record. +func (rec *LogRec) String() string { + if rec.flush != nil { + return "[flusher]" + } + + f := &DefaultFormatter{} + buf := rec.logger.logr.BorrowBuffer() + defer rec.logger.logr.ReleaseBuffer(buf) + buf, _ = f.Format(rec, true, buf) + return strings.TrimSpace(buf.String()) +} + +// getPackageName reduces a fully qualified function name to the package name +// By sirupsen: https://github.com/sirupsen/logrus/blob/master/entry.go +func getPackageName(f string) string { + for { + lastPeriod := strings.LastIndex(f, ".") + lastSlash := strings.LastIndex(f, "/") + if lastPeriod > lastSlash { + f = f[:lastPeriod] + } else { + break + } + } + return f +} diff --git a/vendor/github.com/mattermost/logr/metrics.go b/vendor/github.com/mattermost/logr/metrics.go new file mode 100644 index 0000000..24fe22b --- /dev/null +++ b/vendor/github.com/mattermost/logr/metrics.go @@ -0,0 +1,117 @@ +package logr + +import ( + "errors" + + "github.com/wiggin77/merror" +) + +const ( + DefMetricsUpdateFreqMillis = 15000 // 15 seconds +) + +// Counter is a simple metrics sink that can only increment a value. +// Implementations are external to Logr and provided via `MetricsCollector`. +type Counter interface { + // Inc increments the counter by 1. Use Add to increment it by arbitrary non-negative values. + Inc() + // Add adds the given value to the counter. It panics if the value is < 0. + Add(float64) +} + +// Gauge is a simple metrics sink that can receive values and increase or decrease. +// Implementations are external to Logr and provided via `MetricsCollector`. +type Gauge interface { + // Set sets the Gauge to an arbitrary value. + Set(float64) + // Add adds the given value to the Gauge. (The value can be negative, resulting in a decrease of the Gauge.) + Add(float64) + // Sub subtracts the given value from the Gauge. (The value can be negative, resulting in an increase of the Gauge.) + Sub(float64) +} + +// MetricsCollector provides a way for users of this Logr package to have metrics pushed +// in an efficient way to any backend, e.g. Prometheus. +// For each target added to Logr, the supplied MetricsCollector will provide a Gauge +// and Counters that will be called frequently as logging occurs. +type MetricsCollector interface { + // QueueSizeGauge returns a Gauge that will be updated by the named target. + QueueSizeGauge(target string) (Gauge, error) + // LoggedCounter returns a Counter that will be incremented by the named target. + LoggedCounter(target string) (Counter, error) + // ErrorCounter returns a Counter that will be incremented by the named target. + ErrorCounter(target string) (Counter, error) + // DroppedCounter returns a Counter that will be incremented by the named target. + DroppedCounter(target string) (Counter, error) + // BlockedCounter returns a Counter that will be incremented by the named target. + BlockedCounter(target string) (Counter, error) +} + +// TargetWithMetrics is a target that provides metrics. +type TargetWithMetrics interface { + EnableMetrics(collector MetricsCollector, updateFreqMillis int64) error +} + +func (logr *Logr) getMetricsCollector() MetricsCollector { + logr.mux.RLock() + defer logr.mux.RUnlock() + return logr.metrics +} + +// SetMetricsCollector enables metrics collection by supplying a MetricsCollector. +// The MetricsCollector provides counters and gauges that are updated by log targets. +func (logr *Logr) SetMetricsCollector(collector MetricsCollector) error { + if collector == nil { + return errors.New("collector cannot be nil") + } + + logr.mux.Lock() + logr.metrics = collector + logr.queueSizeGauge, _ = collector.QueueSizeGauge("_logr") + logr.loggedCounter, _ = collector.LoggedCounter("_logr") + logr.errorCounter, _ = collector.ErrorCounter("_logr") + logr.mux.Unlock() + + logr.metricsInitOnce.Do(func() { + logr.metricsDone = make(chan struct{}) + go logr.startMetricsUpdater() + }) + + merr := merror.New() + + logr.tmux.RLock() + defer logr.tmux.RUnlock() + for _, target := range logr.targets { + if tm, ok := target.(TargetWithMetrics); ok { + if err := tm.EnableMetrics(collector, logr.MetricsUpdateFreqMillis); err != nil { + merr.Append(err) + } + } + + } + return merr.ErrorOrNil() +} + +func (logr *Logr) setQueueSizeGauge(val float64) { + logr.mux.RLock() + defer logr.mux.RUnlock() + if logr.queueSizeGauge != nil { + logr.queueSizeGauge.Set(val) + } +} + +func (logr *Logr) incLoggedCounter() { + logr.mux.RLock() + defer logr.mux.RUnlock() + if logr.loggedCounter != nil { + logr.loggedCounter.Inc() + } +} + +func (logr *Logr) incErrorCounter() { + logr.mux.RLock() + defer logr.mux.RUnlock() + if logr.errorCounter != nil { + logr.errorCounter.Inc() + } +} diff --git a/vendor/github.com/mattermost/logr/target.go b/vendor/github.com/mattermost/logr/target.go new file mode 100644 index 0000000..f8e7bf7 --- /dev/null +++ b/vendor/github.com/mattermost/logr/target.go @@ -0,0 +1,299 @@ +package logr + +import ( + "context" + "fmt" + "os" + "sync" + "time" +) + +// Target represents a destination for log records such as file, +// database, TCP socket, etc. +type Target interface { + // SetName provides an optional name for the target. + SetName(name string) + + // IsLevelEnabled returns true if this target should emit + // logs for the specified level. Also determines if + // a stack trace is required. + IsLevelEnabled(Level) (enabled bool, stacktrace bool) + + // Formatter returns the Formatter associated with this Target. + Formatter() Formatter + + // Log outputs the log record to this target's destination. + Log(rec *LogRec) + + // Shutdown makes best effort to flush target queue and + // frees/closes all resources. + Shutdown(ctx context.Context) error +} + +// RecordWriter can convert a LogRecord to bytes and output to some data sink. +type RecordWriter interface { + Write(rec *LogRec) error +} + +// Basic provides the basic functionality of a Target that can be used +// to more easily compose your own Targets. To use, just embed Basic +// in your target type, implement `RecordWriter`, and call `(*Basic).Start`. +type Basic struct { + target Target + + filter Filter + formatter Formatter + + in chan *LogRec + done chan struct{} + w RecordWriter + + mux sync.RWMutex + name string + + metrics bool + queueSizeGauge Gauge + loggedCounter Counter + errorCounter Counter + droppedCounter Counter + blockedCounter Counter + + metricsUpdateFreqMillis int64 +} + +// Start initializes this target helper and starts accepting log records for processing. +func (b *Basic) Start(target Target, rw RecordWriter, filter Filter, formatter Formatter, maxQueued int) { + if filter == nil { + filter = &StdFilter{Lvl: Fatal} + } + if formatter == nil { + formatter = &DefaultFormatter{} + } + + b.target = target + b.filter = filter + b.formatter = formatter + b.in = make(chan *LogRec, maxQueued) + b.done = make(chan struct{}, 1) + b.w = rw + go b.start() + + if b.hasMetrics() { + go b.startMetricsUpdater() + } +} + +func (b *Basic) SetName(name string) { + b.mux.Lock() + defer b.mux.Unlock() + b.name = name +} + +// IsLevelEnabled returns true if this target should emit +// logs for the specified level. Also determines if +// a stack trace is required. +func (b *Basic) IsLevelEnabled(lvl Level) (enabled bool, stacktrace bool) { + return b.filter.IsEnabled(lvl), b.filter.IsStacktraceEnabled(lvl) +} + +// Formatter returns the Formatter associated with this Target. +func (b *Basic) Formatter() Formatter { + return b.formatter +} + +// Shutdown stops processing log records after making best +// effort to flush queue. +func (b *Basic) Shutdown(ctx context.Context) error { + // close the incoming channel and wait for read loop to exit. + close(b.in) + select { + case <-ctx.Done(): + case <-b.done: + } + + // b.in channel should now be drained. + return nil +} + +// Log outputs the log record to this targets destination. +func (b *Basic) Log(rec *LogRec) { + lgr := rec.Logger().Logr() + select { + case b.in <- rec: + default: + handler := lgr.OnTargetQueueFull + if handler != nil && handler(b.target, rec, cap(b.in)) { + b.incDroppedCounter() + return // drop the record + } + b.incBlockedCounter() + + select { + case <-time.After(lgr.enqueueTimeout()): + lgr.ReportError(fmt.Errorf("target enqueue timeout for log rec [%v]", rec)) + case b.in <- rec: // block until success or timeout + } + } +} + +// Metrics enables metrics collection using the provided MetricsCollector. +func (b *Basic) EnableMetrics(collector MetricsCollector, updateFreqMillis int64) error { + name := fmt.Sprintf("%v", b) + + b.mux.Lock() + defer b.mux.Unlock() + + b.metrics = true + b.metricsUpdateFreqMillis = updateFreqMillis + + var err error + + if b.queueSizeGauge, err = collector.QueueSizeGauge(name); err != nil { + return err + } + if b.loggedCounter, err = collector.LoggedCounter(name); err != nil { + return err + } + if b.errorCounter, err = collector.ErrorCounter(name); err != nil { + return err + } + if b.droppedCounter, err = collector.DroppedCounter(name); err != nil { + return err + } + if b.blockedCounter, err = collector.BlockedCounter(name); err != nil { + return err + } + return nil +} + +func (b *Basic) hasMetrics() bool { + b.mux.RLock() + defer b.mux.RUnlock() + return b.metrics +} + +func (b *Basic) setQueueSizeGauge(val float64) { + b.mux.RLock() + defer b.mux.RUnlock() + if b.queueSizeGauge != nil { + b.queueSizeGauge.Set(val) + } +} + +func (b *Basic) incLoggedCounter() { + b.mux.RLock() + defer b.mux.RUnlock() + if b.loggedCounter != nil { + b.loggedCounter.Inc() + } +} + +func (b *Basic) incErrorCounter() { + b.mux.RLock() + defer b.mux.RUnlock() + if b.errorCounter != nil { + b.errorCounter.Inc() + } +} + +func (b *Basic) incDroppedCounter() { + b.mux.RLock() + defer b.mux.RUnlock() + if b.droppedCounter != nil { + b.droppedCounter.Inc() + } +} + +func (b *Basic) incBlockedCounter() { + b.mux.RLock() + defer b.mux.RUnlock() + if b.blockedCounter != nil { + b.blockedCounter.Inc() + } +} + +// String returns a name for this target. Use `SetName` to specify a name. +func (b *Basic) String() string { + b.mux.RLock() + defer b.mux.RUnlock() + + if b.name != "" { + return b.name + } + return fmt.Sprintf("%T", b.target) +} + +// Start accepts log records via In channel and writes to the +// supplied writer, until Done channel signaled. +func (b *Basic) start() { + defer func() { + if r := recover(); r != nil { + fmt.Fprintln(os.Stderr, "Basic.start -- ", r) + go b.start() + } + }() + + for rec := range b.in { + if rec.flush != nil { + b.flush(rec.flush) + } else { + err := b.w.Write(rec) + if err != nil { + b.incErrorCounter() + rec.Logger().Logr().ReportError(err) + } else { + b.incLoggedCounter() + } + } + } + close(b.done) +} + +// startMetricsUpdater updates the metrics for any polled values every `MetricsUpdateFreqSecs` seconds until +// target is closed. +func (b *Basic) startMetricsUpdater() { + for { + updateFreq := b.getMetricsUpdateFreqMillis() + if updateFreq == 0 { + updateFreq = DefMetricsUpdateFreqMillis + } + if updateFreq < 250 { + updateFreq = 250 // don't peg the CPU + } + + select { + case <-b.done: + return + case <-time.After(time.Duration(updateFreq) * time.Millisecond): + b.setQueueSizeGauge(float64(len(b.in))) + } + } +} + +func (b *Basic) getMetricsUpdateFreqMillis() int64 { + b.mux.RLock() + defer b.mux.RUnlock() + return b.metricsUpdateFreqMillis +} + +// flush drains the queue and notifies when done. +func (b *Basic) flush(done chan<- struct{}) { + for { + var rec *LogRec + var err error + select { + case rec = <-b.in: + // ignore any redundant flush records. + if rec.flush == nil { + err = b.w.Write(rec) + if err != nil { + b.incErrorCounter() + rec.Logger().Logr().ReportError(err) + } + } + default: + done <- struct{}{} + return + } + } +} diff --git a/vendor/github.com/mattermost/logr/target/file.go b/vendor/github.com/mattermost/logr/target/file.go new file mode 100644 index 0000000..0fd5076 --- /dev/null +++ b/vendor/github.com/mattermost/logr/target/file.go @@ -0,0 +1,87 @@ +package target + +import ( + "context" + "io" + + "github.com/mattermost/logr" + "github.com/wiggin77/merror" + "gopkg.in/natefinch/lumberjack.v2" +) + +type FileOptions struct { + // Filename is the file to write logs to. Backup log files will be retained + // in the same directory. It uses -lumberjack.log in + // os.TempDir() if empty. + Filename string + + // MaxSize is the maximum size in megabytes of the log file before it gets + // rotated. It defaults to 100 megabytes. + MaxSize int + + // MaxAge is the maximum number of days to retain old log files based on the + // timestamp encoded in their filename. Note that a day is defined as 24 + // hours and may not exactly correspond to calendar days due to daylight + // savings, leap seconds, etc. The default is not to remove old log files + // based on age. + MaxAge int + + // MaxBackups is the maximum number of old log files to retain. The default + // is to retain all old log files (though MaxAge may still cause them to get + // deleted.) + MaxBackups int + + // Compress determines if the rotated log files should be compressed + // using gzip. The default is not to perform compression. + Compress bool +} + +// File outputs log records to a file which can be log rotated based on size or age. +// Uses `https://github.com/natefinch/lumberjack` for rotation. +type File struct { + logr.Basic + out io.WriteCloser +} + +// NewFileTarget creates a target capable of outputting log records to a rotated file. +func NewFileTarget(filter logr.Filter, formatter logr.Formatter, opts FileOptions, maxQueue int) *File { + lumber := &lumberjack.Logger{ + Filename: opts.Filename, + MaxSize: opts.MaxSize, + MaxBackups: opts.MaxBackups, + MaxAge: opts.MaxAge, + Compress: opts.Compress, + } + f := &File{out: lumber} + f.Basic.Start(f, f, filter, formatter, maxQueue) + return f +} + +// Write converts the log record to bytes, via the Formatter, +// and outputs to a file. +func (f *File) Write(rec *logr.LogRec) error { + _, stacktrace := f.IsLevelEnabled(rec.Level()) + + buf := rec.Logger().Logr().BorrowBuffer() + defer rec.Logger().Logr().ReleaseBuffer(buf) + + buf, err := f.Formatter().Format(rec, stacktrace, buf) + if err != nil { + return err + } + _, err = f.out.Write(buf.Bytes()) + return err +} + +// Shutdown flushes any remaining log records and closes the file. +func (f *File) Shutdown(ctx context.Context) error { + errs := merror.New() + + err := f.Basic.Shutdown(ctx) + errs.Append(err) + + err = f.out.Close() + errs.Append(err) + + return errs.ErrorOrNil() +} diff --git a/vendor/github.com/mattermost/logr/target/syslog.go b/vendor/github.com/mattermost/logr/target/syslog.go new file mode 100644 index 0000000..1d2013b --- /dev/null +++ b/vendor/github.com/mattermost/logr/target/syslog.go @@ -0,0 +1,89 @@ +// +build !windows,!nacl,!plan9 + +package target + +import ( + "context" + "fmt" + "log/syslog" + + "github.com/mattermost/logr" + "github.com/wiggin77/merror" +) + +// Syslog outputs log records to local or remote syslog. +type Syslog struct { + logr.Basic + w *syslog.Writer +} + +// SyslogParams provides parameters for dialing a syslog daemon. +type SyslogParams struct { + Network string + Raddr string + Priority syslog.Priority + Tag string +} + +// NewSyslogTarget creates a target capable of outputting log records to remote or local syslog. +func NewSyslogTarget(filter logr.Filter, formatter logr.Formatter, params *SyslogParams, maxQueue int) (*Syslog, error) { + writer, err := syslog.Dial(params.Network, params.Raddr, params.Priority, params.Tag) + if err != nil { + return nil, err + } + + s := &Syslog{w: writer} + s.Basic.Start(s, s, filter, formatter, maxQueue) + + return s, nil +} + +// Shutdown stops processing log records after making best +// effort to flush queue. +func (s *Syslog) Shutdown(ctx context.Context) error { + errs := merror.New() + + err := s.Basic.Shutdown(ctx) + errs.Append(err) + + err = s.w.Close() + errs.Append(err) + + return errs.ErrorOrNil() +} + +// Write converts the log record to bytes, via the Formatter, +// and outputs to syslog. +func (s *Syslog) Write(rec *logr.LogRec) error { + _, stacktrace := s.IsLevelEnabled(rec.Level()) + + buf := rec.Logger().Logr().BorrowBuffer() + defer rec.Logger().Logr().ReleaseBuffer(buf) + + buf, err := s.Formatter().Format(rec, stacktrace, buf) + if err != nil { + return err + } + txt := buf.String() + + switch rec.Level() { + case logr.Panic, logr.Fatal: + err = s.w.Crit(txt) + case logr.Error: + err = s.w.Err(txt) + case logr.Warn: + err = s.w.Warning(txt) + case logr.Debug, logr.Trace: + err = s.w.Debug(txt) + default: + // logr.Info plus all custom levels. + err = s.w.Info(txt) + } + + if err != nil { + reporter := rec.Logger().Logr().ReportError + reporter(fmt.Errorf("syslog write fail: %w", err)) + // syslog writer will try to reconnect. + } + return err +} diff --git a/vendor/github.com/mattermost/logr/target/writer.go b/vendor/github.com/mattermost/logr/target/writer.go new file mode 100644 index 0000000..2250da5 --- /dev/null +++ b/vendor/github.com/mattermost/logr/target/writer.go @@ -0,0 +1,40 @@ +package target + +import ( + "io" + "io/ioutil" + + "github.com/mattermost/logr" +) + +// Writer outputs log records to any `io.Writer`. +type Writer struct { + logr.Basic + out io.Writer +} + +// NewWriterTarget creates a target capable of outputting log records to an io.Writer. +func NewWriterTarget(filter logr.Filter, formatter logr.Formatter, out io.Writer, maxQueue int) *Writer { + if out == nil { + out = ioutil.Discard + } + w := &Writer{out: out} + w.Basic.Start(w, w, filter, formatter, maxQueue) + return w +} + +// Write converts the log record to bytes, via the Formatter, +// and outputs to the io.Writer. +func (w *Writer) Write(rec *logr.LogRec) error { + _, stacktrace := w.IsLevelEnabled(rec.Level()) + + buf := rec.Logger().Logr().BorrowBuffer() + defer rec.Logger().Logr().ReleaseBuffer(buf) + + buf, err := w.Formatter().Format(rec, stacktrace, buf) + if err != nil { + return err + } + _, err = w.out.Write(buf.Bytes()) + return err +} diff --git a/vendor/github.com/mattermost/logr/timeout.go b/vendor/github.com/mattermost/logr/timeout.go new file mode 100644 index 0000000..37737bc --- /dev/null +++ b/vendor/github.com/mattermost/logr/timeout.go @@ -0,0 +1,34 @@ +package logr + +import "github.com/wiggin77/merror" + +// timeoutError is returned from functions that can timeout. +type timeoutError struct { + text string +} + +// newTimeoutError returns a TimeoutError. +func newTimeoutError(text string) timeoutError { + return timeoutError{text: text} +} + +// IsTimeoutError returns true if err is a TimeoutError. +func IsTimeoutError(err error) bool { + if _, ok := err.(timeoutError); ok { + return true + } + // if a multi-error, return true if any of the errors + // are TimeoutError + if merr, ok := err.(*merror.MError); ok { + for _, e := range merr.Errors() { + if IsTimeoutError(e) { + return true + } + } + } + return false +} + +func (err timeoutError) Error() string { + return err.text +} diff --git a/vendor/github.com/mattermost/mattermost-server/mlog/default.go b/vendor/github.com/mattermost/mattermost-server/mlog/default.go deleted file mode 100644 index 366d22f..0000000 --- a/vendor/github.com/mattermost/mattermost-server/mlog/default.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package mlog - -import ( - "encoding/json" - "fmt" -) - -// defaultLog manually encodes the log to STDOUT, providing a basic, default logging implementation -// before mlog is fully configured. -func defaultLog(level, msg string, fields ...Field) { - log := struct { - Level string `json:"level"` - Message string `json:"msg"` - Fields []Field `json:"fields,omitempty"` - }{ - level, - msg, - fields, - } - - if b, err := json.Marshal(log); err != nil { - fmt.Printf(`{"level":"error","msg":"failed to encode log message"}%s`, "\n") - } else { - fmt.Printf("%s\n", b) - } -} - -func defaultDebugLog(msg string, fields ...Field) { - defaultLog("debug", msg, fields...) -} - -func defaultInfoLog(msg string, fields ...Field) { - defaultLog("info", msg, fields...) -} - -func defaultWarnLog(msg string, fields ...Field) { - defaultLog("warn", msg, fields...) -} - -func defaultErrorLog(msg string, fields ...Field) { - defaultLog("error", msg, fields...) -} - -func defaultCriticalLog(msg string, fields ...Field) { - // We map critical to error in zap, so be consistent. - defaultLog("error", msg, fields...) -} diff --git a/vendor/github.com/mattermost/mattermost-server/mlog/global.go b/vendor/github.com/mattermost/mattermost-server/mlog/global.go deleted file mode 100644 index ba90ace..0000000 --- a/vendor/github.com/mattermost/mattermost-server/mlog/global.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package mlog - -import ( - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -var globalLogger *Logger - -func InitGlobalLogger(logger *Logger) { - glob := *logger - glob.zap = glob.zap.WithOptions(zap.AddCallerSkip(1)) - globalLogger = &glob - Debug = globalLogger.Debug - Info = globalLogger.Info - Warn = globalLogger.Warn - Error = globalLogger.Error - Critical = globalLogger.Critical -} - -func RedirectStdLog(logger *Logger) { - zap.RedirectStdLogAt(logger.zap.With(zap.String("source", "stdlog")).WithOptions(zap.AddCallerSkip(-2)), zapcore.ErrorLevel) -} - -type LogFunc func(string, ...Field) - -// DON'T USE THIS Modify the level on the app logger -func GloballyDisableDebugLogForTest() { - globalLogger.consoleLevel.SetLevel(zapcore.ErrorLevel) -} - -// DON'T USE THIS Modify the level on the app logger -func GloballyEnableDebugLogForTest() { - globalLogger.consoleLevel.SetLevel(zapcore.DebugLevel) -} - -var Debug LogFunc = defaultDebugLog -var Info LogFunc = defaultInfoLog -var Warn LogFunc = defaultWarnLog -var Error LogFunc = defaultErrorLog -var Critical LogFunc = defaultCriticalLog diff --git a/vendor/github.com/mattermost/mattermost-server/mlog/log.go b/vendor/github.com/mattermost/mattermost-server/mlog/log.go deleted file mode 100644 index 503b140..0000000 --- a/vendor/github.com/mattermost/mattermost-server/mlog/log.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package mlog - -import ( - "io" - "log" - "os" - - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "gopkg.in/natefinch/lumberjack.v2" -) - -const ( - // Very verbose messages for debugging specific issues - LevelDebug = "debug" - // Default log level, informational - LevelInfo = "info" - // Warnings are messages about possible issues - LevelWarn = "warn" - // Errors are messages about things we know are problems - LevelError = "error" -) - -// Type and function aliases from zap to limit the libraries scope into MM code -type Field = zapcore.Field - -var Int64 = zap.Int64 -var Int = zap.Int -var Uint32 = zap.Uint32 -var String = zap.String -var Any = zap.Any -var Err = zap.Error -var Bool = zap.Bool -var Duration = zap.Duration - -type LoggerConfiguration struct { - EnableConsole bool - ConsoleJson bool - ConsoleLevel string - EnableFile bool - FileJson bool - FileLevel string - FileLocation string -} - -type Logger struct { - zap *zap.Logger - consoleLevel zap.AtomicLevel - fileLevel zap.AtomicLevel -} - -func getZapLevel(level string) zapcore.Level { - switch level { - case LevelInfo: - return zapcore.InfoLevel - case LevelWarn: - return zapcore.WarnLevel - case LevelDebug: - return zapcore.DebugLevel - case LevelError: - return zapcore.ErrorLevel - default: - return zapcore.InfoLevel - } -} - -func makeEncoder(json bool) zapcore.Encoder { - encoderConfig := zap.NewProductionEncoderConfig() - if json { - return zapcore.NewJSONEncoder(encoderConfig) - } - - encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder - return zapcore.NewConsoleEncoder(encoderConfig) -} - -func NewLogger(config *LoggerConfiguration) *Logger { - cores := []zapcore.Core{} - logger := &Logger{ - consoleLevel: zap.NewAtomicLevelAt(getZapLevel(config.ConsoleLevel)), - fileLevel: zap.NewAtomicLevelAt(getZapLevel(config.FileLevel)), - } - - if config.EnableConsole { - writer := zapcore.Lock(os.Stdout) - core := zapcore.NewCore(makeEncoder(config.ConsoleJson), writer, logger.consoleLevel) - cores = append(cores, core) - } - - if config.EnableFile { - writer := zapcore.AddSync(&lumberjack.Logger{ - Filename: config.FileLocation, - MaxSize: 100, - Compress: true, - }) - core := zapcore.NewCore(makeEncoder(config.FileJson), writer, logger.fileLevel) - cores = append(cores, core) - } - - combinedCore := zapcore.NewTee(cores...) - - logger.zap = zap.New(combinedCore, - zap.AddCaller(), - ) - - return logger -} - -func (l *Logger) ChangeLevels(config *LoggerConfiguration) { - l.consoleLevel.SetLevel(getZapLevel(config.ConsoleLevel)) - l.fileLevel.SetLevel(getZapLevel(config.FileLevel)) -} - -func (l *Logger) SetConsoleLevel(level string) { - l.consoleLevel.SetLevel(getZapLevel(level)) -} - -func (l *Logger) With(fields ...Field) *Logger { - newlogger := *l - newlogger.zap = newlogger.zap.With(fields...) - return &newlogger -} - -func (l *Logger) StdLog(fields ...Field) *log.Logger { - return zap.NewStdLog(l.With(fields...).zap.WithOptions(getStdLogOption())) -} - -// StdLogAt returns *log.Logger which writes to supplied zap logger at required level. -func (l *Logger) StdLogAt(level string, fields ...Field) (*log.Logger, error) { - return zap.NewStdLogAt(l.With(fields...).zap.WithOptions(getStdLogOption()), getZapLevel(level)) -} - -// StdLogWriter returns a writer that can be hooked up to the output of a golang standard logger -// anything written will be interpreted as log entries accordingly -func (l *Logger) StdLogWriter() io.Writer { - newLogger := *l - newLogger.zap = newLogger.zap.WithOptions(zap.AddCallerSkip(4), getStdLogOption()) - f := newLogger.Info - return &loggerWriter{f} -} - -func (l *Logger) WithCallerSkip(skip int) *Logger { - newlogger := *l - newlogger.zap = newlogger.zap.WithOptions(zap.AddCallerSkip(skip)) - return &newlogger -} - -// Made for the plugin interface, wraps mlog in a simpler interface -// at the cost of performance -func (l *Logger) Sugar() *SugarLogger { - return &SugarLogger{ - wrappedLogger: l, - zapSugar: l.zap.Sugar(), - } -} - -func (l *Logger) Debug(message string, fields ...Field) { - l.zap.Debug(message, fields...) -} - -func (l *Logger) Info(message string, fields ...Field) { - l.zap.Info(message, fields...) -} - -func (l *Logger) Warn(message string, fields ...Field) { - l.zap.Warn(message, fields...) -} - -func (l *Logger) Error(message string, fields ...Field) { - l.zap.Error(message, fields...) -} - -func (l *Logger) Critical(message string, fields ...Field) { - l.zap.Error(message, fields...) -} diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel_search.go b/vendor/github.com/mattermost/mattermost-server/model/channel_search.go deleted file mode 100644 index 7502372..0000000 --- a/vendor/github.com/mattermost/mattermost-server/model/channel_search.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package model - -import ( - "encoding/json" - "io" -) - -const CHANNEL_SEARCH_DEFAULT_LIMIT = 50 - -type ChannelSearch struct { - Term string `json:"term"` - ExcludeDefaultChannels bool `json:"exclude_default_channels"` - NotAssociatedToGroup string `json:"not_associated_to_group"` -} - -// ToJson convert a Channel to a json string -func (c *ChannelSearch) ToJson() string { - b, _ := json.Marshal(c) - return string(b) -} - -// ChannelSearchFromJson will decode the input and return a Channel -func ChannelSearchFromJson(data io.Reader) *ChannelSearch { - var cs *ChannelSearch - json.NewDecoder(data).Decode(&cs) - return cs -} diff --git a/vendor/github.com/mattermost/mattermost-server/model/command_args.go b/vendor/github.com/mattermost/mattermost-server/model/command_args.go deleted file mode 100644 index 67ea018..0000000 --- a/vendor/github.com/mattermost/mattermost-server/model/command_args.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package model - -import ( - "encoding/json" - "io" - - goi18n "github.com/mattermost/go-i18n/i18n" -) - -type CommandArgs struct { - UserId string `json:"user_id"` - ChannelId string `json:"channel_id"` - TeamId string `json:"team_id"` - RootId string `json:"root_id"` - ParentId string `json:"parent_id"` - TriggerId string `json:"trigger_id,omitempty"` - Command string `json:"command"` - SiteURL string `json:"-"` - T goi18n.TranslateFunc `json:"-"` - Session Session `json:"-"` -} - -func (o *CommandArgs) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func CommandArgsFromJson(data io.Reader) *CommandArgs { - var o *CommandArgs - json.NewDecoder(data).Decode(&o) - return o -} diff --git a/vendor/github.com/mattermost/mattermost-server/model/file_info.go b/vendor/github.com/mattermost/mattermost-server/model/file_info.go deleted file mode 100644 index 65555b6..0000000 --- a/vendor/github.com/mattermost/mattermost-server/model/file_info.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package model - -import ( - "bytes" - "encoding/json" - "image" - "image/gif" - "io" - "mime" - "net/http" - "path/filepath" - "strings" -) - -type FileInfo struct { - Id string `json:"id"` - CreatorId string `json:"user_id"` - PostId string `json:"post_id,omitempty"` - CreateAt int64 `json:"create_at"` - UpdateAt int64 `json:"update_at"` - DeleteAt int64 `json:"delete_at"` - Path string `json:"-"` // not sent back to the client - ThumbnailPath string `json:"-"` // not sent back to the client - PreviewPath string `json:"-"` // not sent back to the client - Name string `json:"name"` - Extension string `json:"extension"` - Size int64 `json:"size"` - MimeType string `json:"mime_type"` - Width int `json:"width,omitempty"` - Height int `json:"height,omitempty"` - HasPreviewImage bool `json:"has_preview_image,omitempty"` -} - -func (info *FileInfo) ToJson() string { - b, _ := json.Marshal(info) - return string(b) -} - -func FileInfoFromJson(data io.Reader) *FileInfo { - decoder := json.NewDecoder(data) - - var info FileInfo - if err := decoder.Decode(&info); err != nil { - return nil - } else { - return &info - } -} - -func FileInfosToJson(infos []*FileInfo) string { - b, _ := json.Marshal(infos) - return string(b) -} - -func FileInfosFromJson(data io.Reader) []*FileInfo { - decoder := json.NewDecoder(data) - - var infos []*FileInfo - if err := decoder.Decode(&infos); err != nil { - return nil - } else { - return infos - } -} - -func (o *FileInfo) PreSave() { - if o.Id == "" { - o.Id = NewId() - } - - if o.CreateAt == 0 { - o.CreateAt = GetMillis() - } - - if o.UpdateAt < o.CreateAt { - o.UpdateAt = o.CreateAt - } -} - -func (o *FileInfo) IsValid() *AppError { - if len(o.Id) != 26 { - return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.id.app_error", nil, "", http.StatusBadRequest) - } - - if len(o.CreatorId) != 26 && o.CreatorId != "nouser" { - return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.user_id.app_error", nil, "id="+o.Id, http.StatusBadRequest) - } - - if len(o.PostId) != 0 && len(o.PostId) != 26 { - return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.post_id.app_error", nil, "id="+o.Id, http.StatusBadRequest) - } - - if o.CreateAt == 0 { - return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.create_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) - } - - if o.UpdateAt == 0 { - return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.update_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) - } - - if o.Path == "" { - return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.path.app_error", nil, "id="+o.Id, http.StatusBadRequest) - } - - return nil -} - -func (o *FileInfo) IsImage() bool { - return strings.HasPrefix(o.MimeType, "image") -} - -func NewInfo(name string) *FileInfo { - info := &FileInfo{ - Name: name, - } - - extension := strings.ToLower(filepath.Ext(name)) - info.MimeType = mime.TypeByExtension(extension) - - if extension != "" && extension[0] == '.' { - // The client expects a file extension without the leading period - info.Extension = extension[1:] - } else { - info.Extension = extension - } - - return info -} - -func GetInfoForBytes(name string, data []byte) (*FileInfo, *AppError) { - info := &FileInfo{ - Name: name, - Size: int64(len(data)), - } - var err *AppError - - extension := strings.ToLower(filepath.Ext(name)) - info.MimeType = mime.TypeByExtension(extension) - - if extension != "" && extension[0] == '.' { - // The client expects a file extension without the leading period - info.Extension = extension[1:] - } else { - info.Extension = extension - } - - if info.IsImage() { - // Only set the width and height if it's actually an image that we can understand - if config, _, err := image.DecodeConfig(bytes.NewReader(data)); err == nil { - info.Width = config.Width - info.Height = config.Height - - if info.MimeType == "image/gif" { - // Just show the gif itself instead of a preview image for animated gifs - if gifConfig, err := gif.DecodeAll(bytes.NewReader(data)); err != nil { - // Still return the rest of the info even though it doesn't appear to be an actual gif - info.HasPreviewImage = true - err = NewAppError("GetInfoForBytes", "model.file_info.get.gif.app_error", nil, "name="+name, http.StatusBadRequest) - } else { - info.HasPreviewImage = len(gifConfig.Image) == 1 - } - } else { - info.HasPreviewImage = true - } - } - } - - return info, err -} - -func GetEtagForFileInfos(infos []*FileInfo) string { - if len(infos) == 0 { - return Etag() - } - - var maxUpdateAt int64 - - for _, info := range infos { - if info.UpdateAt > maxUpdateAt { - maxUpdateAt = info.UpdateAt - } - } - - return Etag(infos[0].PostId, maxUpdateAt) -} diff --git a/vendor/github.com/mattermost/mattermost-server/model/group.go b/vendor/github.com/mattermost/mattermost-server/model/group.go deleted file mode 100644 index 535fbdf..0000000 --- a/vendor/github.com/mattermost/mattermost-server/model/group.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright (c) 2018-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package model - -import ( - "encoding/json" - "io" - "net/http" -) - -const ( - GroupSourceLdap GroupSource = "ldap" - - GroupNameMaxLength = 64 - GroupSourceMaxLength = 64 - GroupDisplayNameMaxLength = 128 - GroupDescriptionMaxLength = 1024 - GroupRemoteIDMaxLength = 48 -) - -type GroupSource string - -var allGroupSources = []GroupSource{ - GroupSourceLdap, -} - -var groupSourcesRequiringRemoteID = []GroupSource{ - GroupSourceLdap, -} - -type Group struct { - Id string `json:"id"` - Name string `json:"name"` - DisplayName string `json:"display_name"` - Description string `json:"description"` - Source GroupSource `json:"source"` - RemoteId string `json:"remote_id"` - CreateAt int64 `json:"create_at"` - UpdateAt int64 `json:"update_at"` - DeleteAt int64 `json:"delete_at"` - HasSyncables bool `db:"-" json:"has_syncables"` - MemberCount *int `db:"-" json:"member_count,omitempty"` -} - -type GroupPatch struct { - Name *string `json:"name"` - DisplayName *string `json:"display_name"` - Description *string `json:"description"` -} - -type LdapGroupSearchOpts struct { - Q string - IsLinked *bool - IsConfigured *bool -} - -type GroupSearchOpts struct { - Q string - NotAssociatedToTeam string - NotAssociatedToChannel string - IncludeMemberCount bool - PageOpts *PageOpts -} - -type PageOpts struct { - Page int - PerPage int -} - -func (group *Group) Patch(patch *GroupPatch) { - if patch.Name != nil { - group.Name = *patch.Name - } - if patch.DisplayName != nil { - group.DisplayName = *patch.DisplayName - } - if patch.Description != nil { - group.Description = *patch.Description - } -} - -func (group *Group) IsValidForCreate() *AppError { - if l := len(group.Name); l == 0 || l > GroupNameMaxLength { - return NewAppError("Group.IsValidForCreate", "model.group.name.app_error", map[string]interface{}{"GroupNameMaxLength": GroupNameMaxLength}, "", http.StatusBadRequest) - } - - if l := len(group.DisplayName); l == 0 || l > GroupDisplayNameMaxLength { - return NewAppError("Group.IsValidForCreate", "model.group.display_name.app_error", map[string]interface{}{"GroupDisplayNameMaxLength": GroupDisplayNameMaxLength}, "", http.StatusBadRequest) - } - - if len(group.Description) > GroupDescriptionMaxLength { - return NewAppError("Group.IsValidForCreate", "model.group.description.app_error", map[string]interface{}{"GroupDescriptionMaxLength": GroupDescriptionMaxLength}, "", http.StatusBadRequest) - } - - isValidSource := false - for _, groupSource := range allGroupSources { - if group.Source == groupSource { - isValidSource = true - break - } - } - if !isValidSource { - return NewAppError("Group.IsValidForCreate", "model.group.source.app_error", nil, "", http.StatusBadRequest) - } - - if len(group.RemoteId) > GroupRemoteIDMaxLength || (len(group.RemoteId) == 0 && group.requiresRemoteId()) { - return NewAppError("Group.IsValidForCreate", "model.group.remote_id.app_error", nil, "", http.StatusBadRequest) - } - - return nil -} - -func (group *Group) requiresRemoteId() bool { - for _, groupSource := range groupSourcesRequiringRemoteID { - if groupSource == group.Source { - return true - } - } - return false -} - -func (group *Group) IsValidForUpdate() *AppError { - if len(group.Id) != 26 { - return NewAppError("Group.IsValidForUpdate", "model.group.id.app_error", nil, "", http.StatusBadRequest) - } - if group.CreateAt == 0 { - return NewAppError("Group.IsValidForUpdate", "model.group.create_at.app_error", nil, "", http.StatusBadRequest) - } - if group.UpdateAt == 0 { - return NewAppError("Group.IsValidForUpdate", "model.group.update_at.app_error", nil, "", http.StatusBadRequest) - } - if err := group.IsValidForCreate(); err != nil { - return err - } - return nil -} - -func GroupFromJson(data io.Reader) *Group { - var group *Group - json.NewDecoder(data).Decode(&group) - return group -} - -func GroupsFromJson(data io.Reader) []*Group { - var groups []*Group - json.NewDecoder(data).Decode(&groups) - return groups -} - -func GroupPatchFromJson(data io.Reader) *GroupPatch { - var groupPatch *GroupPatch - json.NewDecoder(data).Decode(&groupPatch) - return groupPatch -} diff --git a/vendor/github.com/mattermost/mattermost-server/model/ldap.go b/vendor/github.com/mattermost/mattermost-server/model/ldap.go deleted file mode 100644 index 9051c5a..0000000 --- a/vendor/github.com/mattermost/mattermost-server/model/ldap.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package model - -const ( - USER_AUTH_SERVICE_LDAP = "ldap" -) diff --git a/vendor/github.com/mattermost/mattermost-server/model/mfa_secret.go b/vendor/github.com/mattermost/mattermost-server/model/mfa_secret.go deleted file mode 100644 index 23a903c..0000000 --- a/vendor/github.com/mattermost/mattermost-server/model/mfa_secret.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package model - -import ( - "encoding/json" - "io" -) - -type MfaSecret struct { - Secret string `json:"secret"` - QRCode string `json:"qr_code"` -} - -func (me *MfaSecret) ToJson() string { - b, _ := json.Marshal(me) - return string(b) -} - -func MfaSecretFromJson(data io.Reader) *MfaSecret { - var me *MfaSecret - json.NewDecoder(data).Decode(&me) - return me -} diff --git a/vendor/github.com/mattermost/mattermost-server/model/migration.go b/vendor/github.com/mattermost/mattermost-server/model/migration.go deleted file mode 100644 index ead7acc..0000000 --- a/vendor/github.com/mattermost/mattermost-server/model/migration.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (c) 2018-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package model - -const ( - MIGRATION_KEY_ADVANCED_PERMISSIONS_PHASE_2 = "migration_advanced_permissions_phase_2" -) diff --git a/vendor/github.com/mattermost/mattermost-server/model/push_notification.go b/vendor/github.com/mattermost/mattermost-server/model/push_notification.go deleted file mode 100644 index 03b49bc..0000000 --- a/vendor/github.com/mattermost/mattermost-server/model/push_notification.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package model - -import ( - "encoding/json" - "io" - "strings" -) - -const ( - PUSH_NOTIFY_APPLE = "apple" - PUSH_NOTIFY_ANDROID = "android" - PUSH_NOTIFY_APPLE_REACT_NATIVE = "apple_rn" - PUSH_NOTIFY_ANDROID_REACT_NATIVE = "android_rn" - - PUSH_TYPE_MESSAGE = "message" - PUSH_TYPE_CLEAR = "clear" - PUSH_MESSAGE_V2 = "v2" - - // The category is set to handle a set of interactive Actions - // with the push notifications - CATEGORY_CAN_REPLY = "CAN_REPLY" - - MHPNS = "https://push.mattermost.com" - - PUSH_SEND_PREPARE = "Prepared to send" - PUSH_SEND_SUCCESS = "Successful" - PUSH_NOT_SENT = "Not Sent due to preferences" - PUSH_RECEIVED = "Received by device" -) - -type PushNotificationAck struct { - Id string `json:"id"` - ClientReceivedAt int64 `json:"received_at"` - ClientPlatform string `json:"platform"` - NotificationType string `json:"type"` -} - -type PushNotification struct { - AckId string `json:"ack_id"` - Platform string `json:"platform"` - ServerId string `json:"server_id"` - DeviceId string `json:"device_id"` - Category string `json:"category"` - Sound string `json:"sound"` - Message string `json:"message"` - Badge int `json:"badge"` - ContentAvailable int `json:"cont_ava"` - TeamId string `json:"team_id"` - ChannelId string `json:"channel_id"` - PostId string `json:"post_id"` - RootId string `json:"root_id"` - ChannelName string `json:"channel_name"` - Type string `json:"type"` - SenderId string `json:"sender_id"` - SenderName string `json:"sender_name"` - OverrideUsername string `json:"override_username"` - OverrideIconUrl string `json:"override_icon_url"` - FromWebhook string `json:"from_webhook"` - Version string `json:"version"` -} - -func (me *PushNotification) ToJson() string { - b, _ := json.Marshal(me) - return string(b) -} - -func (me *PushNotification) SetDeviceIdAndPlatform(deviceId string) { - - index := strings.Index(deviceId, ":") - - if index > -1 { - me.Platform = deviceId[:index] - me.DeviceId = deviceId[index+1:] - } -} - -func PushNotificationFromJson(data io.Reader) *PushNotification { - var me *PushNotification - json.NewDecoder(data).Decode(&me) - return me -} - -func PushNotificationAckFromJson(data io.Reader) *PushNotificationAck { - var ack *PushNotificationAck - json.NewDecoder(data).Decode(&ack) - return ack -} - -func (ack *PushNotificationAck) ToJson() string { - b, _ := json.Marshal(ack) - return string(b) -} diff --git a/vendor/github.com/mattermost/mattermost-server/model/role.go b/vendor/github.com/mattermost/mattermost-server/model/role.go deleted file mode 100644 index 86dcd85..0000000 --- a/vendor/github.com/mattermost/mattermost-server/model/role.go +++ /dev/null @@ -1,419 +0,0 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package model - -import ( - "encoding/json" - "io" - "strings" -) - -const ( - SYSTEM_GUEST_ROLE_ID = "system_guest" - SYSTEM_USER_ROLE_ID = "system_user" - SYSTEM_ADMIN_ROLE_ID = "system_admin" - SYSTEM_POST_ALL_ROLE_ID = "system_post_all" - SYSTEM_POST_ALL_PUBLIC_ROLE_ID = "system_post_all_public" - SYSTEM_USER_ACCESS_TOKEN_ROLE_ID = "system_user_access_token" - - TEAM_GUEST_ROLE_ID = "team_guest" - TEAM_USER_ROLE_ID = "team_user" - TEAM_ADMIN_ROLE_ID = "team_admin" - TEAM_POST_ALL_ROLE_ID = "team_post_all" - TEAM_POST_ALL_PUBLIC_ROLE_ID = "team_post_all_public" - - CHANNEL_GUEST_ROLE_ID = "channel_guest" - CHANNEL_USER_ROLE_ID = "channel_user" - CHANNEL_ADMIN_ROLE_ID = "channel_admin" - - ROLE_NAME_MAX_LENGTH = 64 - ROLE_DISPLAY_NAME_MAX_LENGTH = 128 - ROLE_DESCRIPTION_MAX_LENGTH = 1024 -) - -type Role struct { - Id string `json:"id"` - Name string `json:"name"` - DisplayName string `json:"display_name"` - Description string `json:"description"` - CreateAt int64 `json:"create_at"` - UpdateAt int64 `json:"update_at"` - DeleteAt int64 `json:"delete_at"` - Permissions []string `json:"permissions"` - SchemeManaged bool `json:"scheme_managed"` - BuiltIn bool `json:"built_in"` -} - -type RolePatch struct { - Permissions *[]string `json:"permissions"` -} - -func (role *Role) ToJson() string { - b, _ := json.Marshal(role) - return string(b) -} - -func RoleFromJson(data io.Reader) *Role { - var role *Role - json.NewDecoder(data).Decode(&role) - return role -} - -func RoleListToJson(r []*Role) string { - b, _ := json.Marshal(r) - return string(b) -} - -func RoleListFromJson(data io.Reader) []*Role { - var roles []*Role - json.NewDecoder(data).Decode(&roles) - return roles -} - -func (r *RolePatch) ToJson() string { - b, _ := json.Marshal(r) - return string(b) -} - -func RolePatchFromJson(data io.Reader) *RolePatch { - var rolePatch *RolePatch - json.NewDecoder(data).Decode(&rolePatch) - return rolePatch -} - -func (o *Role) Patch(patch *RolePatch) { - if patch.Permissions != nil { - o.Permissions = *patch.Permissions - } -} - -// Returns an array of permissions that are in either role.Permissions -// or patch.Permissions, but not both. -func PermissionsChangedByPatch(role *Role, patch *RolePatch) []string { - var result []string - - if patch.Permissions == nil { - return result - } - - roleMap := make(map[string]bool) - patchMap := make(map[string]bool) - - for _, permission := range role.Permissions { - roleMap[permission] = true - } - - for _, permission := range *patch.Permissions { - patchMap[permission] = true - } - - for _, permission := range role.Permissions { - if !patchMap[permission] { - result = append(result, permission) - } - } - - for _, permission := range *patch.Permissions { - if !roleMap[permission] { - result = append(result, permission) - } - } - - return result -} - -func (role *Role) IsValid() bool { - if len(role.Id) != 26 { - return false - } - - return role.IsValidWithoutId() -} - -func (role *Role) IsValidWithoutId() bool { - if !IsValidRoleName(role.Name) { - return false - } - - if len(role.DisplayName) == 0 || len(role.DisplayName) > ROLE_DISPLAY_NAME_MAX_LENGTH { - return false - } - - if len(role.Description) > ROLE_DESCRIPTION_MAX_LENGTH { - return false - } - - for _, permission := range role.Permissions { - permissionValidated := false - for _, p := range ALL_PERMISSIONS { - if permission == p.Id { - permissionValidated = true - break - } - } - - if !permissionValidated { - return false - } - } - - return true -} - -func IsValidRoleName(roleName string) bool { - if len(roleName) <= 0 || len(roleName) > ROLE_NAME_MAX_LENGTH { - return false - } - - if strings.TrimLeft(roleName, "abcdefghijklmnopqrstuvwxyz0123456789_") != "" { - return false - } - - return true -} - -func MakeDefaultRoles() map[string]*Role { - roles := make(map[string]*Role) - - roles[CHANNEL_GUEST_ROLE_ID] = &Role{ - Name: "channel_guest", - DisplayName: "authentication.roles.channel_guest.name", - Description: "authentication.roles.channel_guest.description", - Permissions: []string{ - PERMISSION_READ_CHANNEL.Id, - PERMISSION_ADD_REACTION.Id, - PERMISSION_REMOVE_REACTION.Id, - PERMISSION_UPLOAD_FILE.Id, - PERMISSION_EDIT_POST.Id, - PERMISSION_CREATE_POST.Id, - PERMISSION_USE_SLASH_COMMANDS.Id, - }, - SchemeManaged: true, - BuiltIn: true, - } - - roles[CHANNEL_USER_ROLE_ID] = &Role{ - Name: "channel_user", - DisplayName: "authentication.roles.channel_user.name", - Description: "authentication.roles.channel_user.description", - Permissions: []string{ - PERMISSION_READ_CHANNEL.Id, - PERMISSION_ADD_REACTION.Id, - PERMISSION_REMOVE_REACTION.Id, - PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS.Id, - PERMISSION_UPLOAD_FILE.Id, - PERMISSION_GET_PUBLIC_LINK.Id, - PERMISSION_CREATE_POST.Id, - PERMISSION_USE_SLASH_COMMANDS.Id, - }, - SchemeManaged: true, - BuiltIn: true, - } - - roles[CHANNEL_ADMIN_ROLE_ID] = &Role{ - Name: "channel_admin", - DisplayName: "authentication.roles.channel_admin.name", - Description: "authentication.roles.channel_admin.description", - Permissions: []string{ - PERMISSION_MANAGE_CHANNEL_ROLES.Id, - }, - SchemeManaged: true, - BuiltIn: true, - } - - roles[TEAM_GUEST_ROLE_ID] = &Role{ - Name: "team_guest", - DisplayName: "authentication.roles.team_guest.name", - Description: "authentication.roles.team_guest.description", - Permissions: []string{ - PERMISSION_VIEW_TEAM.Id, - }, - SchemeManaged: true, - BuiltIn: true, - } - - roles[TEAM_USER_ROLE_ID] = &Role{ - Name: "team_user", - DisplayName: "authentication.roles.team_user.name", - Description: "authentication.roles.team_user.description", - Permissions: []string{ - PERMISSION_LIST_TEAM_CHANNELS.Id, - PERMISSION_JOIN_PUBLIC_CHANNELS.Id, - PERMISSION_READ_PUBLIC_CHANNEL.Id, - PERMISSION_VIEW_TEAM.Id, - }, - SchemeManaged: true, - BuiltIn: true, - } - - roles[TEAM_POST_ALL_ROLE_ID] = &Role{ - Name: "team_post_all", - DisplayName: "authentication.roles.team_post_all.name", - Description: "authentication.roles.team_post_all.description", - Permissions: []string{ - PERMISSION_CREATE_POST.Id, - }, - SchemeManaged: false, - BuiltIn: true, - } - - roles[TEAM_POST_ALL_PUBLIC_ROLE_ID] = &Role{ - Name: "team_post_all_public", - DisplayName: "authentication.roles.team_post_all_public.name", - Description: "authentication.roles.team_post_all_public.description", - Permissions: []string{ - PERMISSION_CREATE_POST_PUBLIC.Id, - }, - SchemeManaged: false, - BuiltIn: true, - } - - roles[TEAM_ADMIN_ROLE_ID] = &Role{ - Name: "team_admin", - DisplayName: "authentication.roles.team_admin.name", - Description: "authentication.roles.team_admin.description", - Permissions: []string{ - PERMISSION_REMOVE_USER_FROM_TEAM.Id, - PERMISSION_MANAGE_TEAM.Id, - PERMISSION_IMPORT_TEAM.Id, - PERMISSION_MANAGE_TEAM_ROLES.Id, - PERMISSION_MANAGE_CHANNEL_ROLES.Id, - PERMISSION_MANAGE_OTHERS_INCOMING_WEBHOOKS.Id, - PERMISSION_MANAGE_OTHERS_OUTGOING_WEBHOOKS.Id, - PERMISSION_MANAGE_SLASH_COMMANDS.Id, - PERMISSION_MANAGE_OTHERS_SLASH_COMMANDS.Id, - PERMISSION_MANAGE_INCOMING_WEBHOOKS.Id, - PERMISSION_MANAGE_OUTGOING_WEBHOOKS.Id, - }, - SchemeManaged: true, - BuiltIn: true, - } - - roles[SYSTEM_GUEST_ROLE_ID] = &Role{ - Name: "system_guest", - DisplayName: "authentication.roles.global_guest.name", - Description: "authentication.roles.global_guest.description", - Permissions: []string{ - PERMISSION_CREATE_DIRECT_CHANNEL.Id, - PERMISSION_CREATE_GROUP_CHANNEL.Id, - }, - SchemeManaged: true, - BuiltIn: true, - } - - roles[SYSTEM_USER_ROLE_ID] = &Role{ - Name: "system_user", - DisplayName: "authentication.roles.global_user.name", - Description: "authentication.roles.global_user.description", - Permissions: []string{ - PERMISSION_LIST_PUBLIC_TEAMS.Id, - PERMISSION_JOIN_PUBLIC_TEAMS.Id, - PERMISSION_CREATE_DIRECT_CHANNEL.Id, - PERMISSION_CREATE_GROUP_CHANNEL.Id, - PERMISSION_VIEW_MEMBERS.Id, - }, - SchemeManaged: true, - BuiltIn: true, - } - - roles[SYSTEM_POST_ALL_ROLE_ID] = &Role{ - Name: "system_post_all", - DisplayName: "authentication.roles.system_post_all.name", - Description: "authentication.roles.system_post_all.description", - Permissions: []string{ - PERMISSION_CREATE_POST.Id, - }, - SchemeManaged: false, - BuiltIn: true, - } - - roles[SYSTEM_POST_ALL_PUBLIC_ROLE_ID] = &Role{ - Name: "system_post_all_public", - DisplayName: "authentication.roles.system_post_all_public.name", - Description: "authentication.roles.system_post_all_public.description", - Permissions: []string{ - PERMISSION_CREATE_POST_PUBLIC.Id, - }, - SchemeManaged: false, - BuiltIn: true, - } - - roles[SYSTEM_USER_ACCESS_TOKEN_ROLE_ID] = &Role{ - Name: "system_user_access_token", - DisplayName: "authentication.roles.system_user_access_token.name", - Description: "authentication.roles.system_user_access_token.description", - Permissions: []string{ - PERMISSION_CREATE_USER_ACCESS_TOKEN.Id, - PERMISSION_READ_USER_ACCESS_TOKEN.Id, - PERMISSION_REVOKE_USER_ACCESS_TOKEN.Id, - }, - SchemeManaged: false, - BuiltIn: true, - } - - roles[SYSTEM_ADMIN_ROLE_ID] = &Role{ - Name: "system_admin", - DisplayName: "authentication.roles.global_admin.name", - Description: "authentication.roles.global_admin.description", - // System admins can do anything channel and team admins can do - // plus everything members of teams and channels can do to all teams - // and channels on the system - Permissions: append( - append( - append( - append( - []string{ - PERMISSION_ASSIGN_SYSTEM_ADMIN_ROLE.Id, - PERMISSION_MANAGE_SYSTEM.Id, - PERMISSION_MANAGE_ROLES.Id, - PERMISSION_MANAGE_PUBLIC_CHANNEL_PROPERTIES.Id, - PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS.Id, - PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS.Id, - PERMISSION_DELETE_PUBLIC_CHANNEL.Id, - PERMISSION_CREATE_PUBLIC_CHANNEL.Id, - PERMISSION_MANAGE_PRIVATE_CHANNEL_PROPERTIES.Id, - PERMISSION_DELETE_PRIVATE_CHANNEL.Id, - PERMISSION_CREATE_PRIVATE_CHANNEL.Id, - PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH.Id, - PERMISSION_MANAGE_OTHERS_INCOMING_WEBHOOKS.Id, - PERMISSION_MANAGE_OTHERS_OUTGOING_WEBHOOKS.Id, - PERMISSION_EDIT_OTHER_USERS.Id, - PERMISSION_EDIT_OTHERS_POSTS.Id, - PERMISSION_MANAGE_OAUTH.Id, - PERMISSION_INVITE_USER.Id, - PERMISSION_DELETE_POST.Id, - PERMISSION_DELETE_OTHERS_POSTS.Id, - PERMISSION_CREATE_TEAM.Id, - PERMISSION_ADD_USER_TO_TEAM.Id, - PERMISSION_LIST_USERS_WITHOUT_TEAM.Id, - PERMISSION_MANAGE_JOBS.Id, - PERMISSION_CREATE_POST_PUBLIC.Id, - PERMISSION_CREATE_POST_EPHEMERAL.Id, - PERMISSION_CREATE_USER_ACCESS_TOKEN.Id, - PERMISSION_READ_USER_ACCESS_TOKEN.Id, - PERMISSION_REVOKE_USER_ACCESS_TOKEN.Id, - PERMISSION_CREATE_BOT.Id, - PERMISSION_READ_BOTS.Id, - PERMISSION_READ_OTHERS_BOTS.Id, - PERMISSION_MANAGE_BOTS.Id, - PERMISSION_MANAGE_OTHERS_BOTS.Id, - PERMISSION_REMOVE_OTHERS_REACTIONS.Id, - PERMISSION_LIST_PRIVATE_TEAMS.Id, - PERMISSION_JOIN_PRIVATE_TEAMS.Id, - PERMISSION_VIEW_MEMBERS.Id, - }, - roles[TEAM_USER_ROLE_ID].Permissions..., - ), - roles[CHANNEL_USER_ROLE_ID].Permissions..., - ), - roles[TEAM_ADMIN_ROLE_ID].Permissions..., - ), - roles[CHANNEL_ADMIN_ROLE_ID].Permissions..., - ), - SchemeManaged: true, - BuiltIn: true, - } - - return roles -} diff --git a/vendor/github.com/mattermost/mattermost-server/model/saml.go b/vendor/github.com/mattermost/mattermost-server/model/saml.go deleted file mode 100644 index c184b03..0000000 --- a/vendor/github.com/mattermost/mattermost-server/model/saml.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package model - -import ( - "encoding/json" - "io" -) - -const ( - USER_AUTH_SERVICE_SAML = "saml" - USER_AUTH_SERVICE_SAML_TEXT = "SAML" -) - -type SamlAuthRequest struct { - Base64AuthRequest string - URL string - RelayState string -} - -type SamlCertificateStatus struct { - IdpCertificateFile bool `json:"idp_certificate_file"` - PrivateKeyFile bool `json:"private_key_file"` - PublicCertificateFile bool `json:"public_certificate_file"` -} - -func (s *SamlCertificateStatus) ToJson() string { - b, _ := json.Marshal(s) - return string(b) -} - -func SamlCertificateStatusFromJson(data io.Reader) *SamlCertificateStatus { - var status *SamlCertificateStatus - json.NewDecoder(data).Decode(&status) - return status -} diff --git a/vendor/github.com/mattermost/mattermost-server/model/search_params.go b/vendor/github.com/mattermost/mattermost-server/model/search_params.go deleted file mode 100644 index 261c0de..0000000 --- a/vendor/github.com/mattermost/mattermost-server/model/search_params.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package model - -import ( - "regexp" - "strings" - "time" -) - -var searchTermPuncStart = regexp.MustCompile(`^[^\pL\d\s#"]+`) -var searchTermPuncEnd = regexp.MustCompile(`[^\pL\d\s*"]+$`) - -type SearchParams struct { - Terms string - IsHashtag bool - InChannels []string - FromUsers []string - AfterDate string - BeforeDate string - OnDate string - OrTerms bool - IncludeDeletedChannels bool - TimeZoneOffset int - // True if this search doesn't originate from a "current user". - SearchWithoutUserId bool -} - -// Returns the epoch timestamp of the start of the day specified by SearchParams.AfterDate -func (p *SearchParams) GetAfterDateMillis() int64 { - date, err := time.Parse("2006-01-02", PadDateStringZeros(p.AfterDate)) - if err != nil { - date = time.Now() - } - - // travel forward 1 day - oneDay := time.Hour * 24 - afterDate := date.Add(oneDay) - return GetStartOfDayMillis(afterDate, p.TimeZoneOffset) -} - -// Returns the epoch timestamp of the end of the day specified by SearchParams.BeforeDate -func (p *SearchParams) GetBeforeDateMillis() int64 { - date, err := time.Parse("2006-01-02", PadDateStringZeros(p.BeforeDate)) - if err != nil { - return 0 - } - - // travel back 1 day - oneDay := time.Hour * -24 - beforeDate := date.Add(oneDay) - return GetEndOfDayMillis(beforeDate, p.TimeZoneOffset) -} - -// Returns the epoch timestamps of the start and end of the day specified by SearchParams.OnDate -func (p *SearchParams) GetOnDateMillis() (int64, int64) { - date, err := time.Parse("2006-01-02", PadDateStringZeros(p.OnDate)) - if err != nil { - return 0, 0 - } - - return GetStartOfDayMillis(date, p.TimeZoneOffset), GetEndOfDayMillis(date, p.TimeZoneOffset) -} - -var searchFlags = [...]string{"from", "channel", "in", "before", "after", "on"} - -func splitWords(text string) []string { - words := []string{} - - foundQuote := false - location := 0 - for i, char := range text { - if char == '"' { - if foundQuote { - // Grab the quoted section - word := text[location : i+1] - words = append(words, word) - foundQuote = false - location = i + 1 - } else { - words = append(words, strings.Fields(text[location:i])...) - foundQuote = true - location = i - } - } - } - - words = append(words, strings.Fields(text[location:])...) - - return words -} - -func parseSearchFlags(input []string) ([]string, [][2]string) { - words := []string{} - flags := [][2]string{} - - skipNextWord := false - for i, word := range input { - if skipNextWord { - skipNextWord = false - continue - } - - isFlag := false - - if colon := strings.Index(word, ":"); colon != -1 { - flag := word[:colon] - value := word[colon+1:] - - for _, searchFlag := range searchFlags { - // check for case insensitive equality - if strings.EqualFold(flag, searchFlag) { - if value != "" { - flags = append(flags, [2]string{searchFlag, value}) - isFlag = true - } else if i < len(input)-1 { - flags = append(flags, [2]string{searchFlag, input[i+1]}) - skipNextWord = true - isFlag = true - } - - if isFlag { - break - } - } - } - } - - if !isFlag { - // trim off surrounding punctuation (note that we leave trailing asterisks to allow wildcards) - word = searchTermPuncStart.ReplaceAllString(word, "") - word = searchTermPuncEnd.ReplaceAllString(word, "") - - // and remove extra pound #s - word = hashtagStart.ReplaceAllString(word, "#") - - if len(word) != 0 { - words = append(words, word) - } - } - } - - return words, flags -} - -func ParseSearchParams(text string, timeZoneOffset int) []*SearchParams { - words, flags := parseSearchFlags(splitWords(text)) - - hashtagTermList := []string{} - plainTermList := []string{} - - for _, word := range words { - if validHashtag.MatchString(word) { - hashtagTermList = append(hashtagTermList, word) - } else { - plainTermList = append(plainTermList, word) - } - } - - hashtagTerms := strings.Join(hashtagTermList, " ") - plainTerms := strings.Join(plainTermList, " ") - - inChannels := []string{} - fromUsers := []string{} - afterDate := "" - beforeDate := "" - onDate := "" - - for _, flagPair := range flags { - flag := flagPair[0] - value := flagPair[1] - - if flag == "in" || flag == "channel" { - inChannels = append(inChannels, value) - } else if flag == "from" { - fromUsers = append(fromUsers, value) - } else if flag == "after" { - afterDate = value - } else if flag == "before" { - beforeDate = value - } else if flag == "on" { - onDate = value - } - } - - paramsList := []*SearchParams{} - - if len(plainTerms) > 0 { - paramsList = append(paramsList, &SearchParams{ - Terms: plainTerms, - IsHashtag: false, - InChannels: inChannels, - FromUsers: fromUsers, - AfterDate: afterDate, - BeforeDate: beforeDate, - OnDate: onDate, - TimeZoneOffset: timeZoneOffset, - }) - } - - if len(hashtagTerms) > 0 { - paramsList = append(paramsList, &SearchParams{ - Terms: hashtagTerms, - IsHashtag: true, - InChannels: inChannels, - FromUsers: fromUsers, - AfterDate: afterDate, - BeforeDate: beforeDate, - OnDate: onDate, - TimeZoneOffset: timeZoneOffset, - }) - } - - // special case for when no terms are specified but we still have a filter - if len(plainTerms) == 0 && len(hashtagTerms) == 0 && (len(inChannels) != 0 || len(fromUsers) != 0 || len(afterDate) != 0 || len(beforeDate) != 0 || len(onDate) != 0) { - paramsList = append(paramsList, &SearchParams{ - Terms: "", - IsHashtag: false, - InChannels: inChannels, - FromUsers: fromUsers, - AfterDate: afterDate, - BeforeDate: beforeDate, - OnDate: onDate, - TimeZoneOffset: timeZoneOffset, - }) - } - - return paramsList -} diff --git a/vendor/github.com/mattermost/mattermost-server/model/session.go b/vendor/github.com/mattermost/mattermost-server/model/session.go deleted file mode 100644 index 2e8dba4..0000000 --- a/vendor/github.com/mattermost/mattermost-server/model/session.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package model - -import ( - "encoding/json" - "io" - "strings" -) - -const ( - SESSION_COOKIE_TOKEN = "MMAUTHTOKEN" - SESSION_COOKIE_USER = "MMUSERID" - SESSION_COOKIE_CSRF = "MMCSRF" - SESSION_CACHE_SIZE = 35000 - SESSION_PROP_PLATFORM = "platform" - SESSION_PROP_OS = "os" - SESSION_PROP_BROWSER = "browser" - SESSION_PROP_TYPE = "type" - SESSION_PROP_USER_ACCESS_TOKEN_ID = "user_access_token_id" - SESSION_PROP_IS_BOT = "is_bot" - SESSION_PROP_IS_BOT_VALUE = "true" - SESSION_TYPE_USER_ACCESS_TOKEN = "UserAccessToken" - SESSION_ACTIVITY_TIMEOUT = 1000 * 60 * 5 // 5 minutes - SESSION_USER_ACCESS_TOKEN_EXPIRY = 100 * 365 // 100 years -) - -type Session struct { - Id string `json:"id"` - Token string `json:"token"` - CreateAt int64 `json:"create_at"` - ExpiresAt int64 `json:"expires_at"` - LastActivityAt int64 `json:"last_activity_at"` - UserId string `json:"user_id"` - DeviceId string `json:"device_id"` - Roles string `json:"roles"` - IsOAuth bool `json:"is_oauth"` - Props StringMap `json:"props"` - TeamMembers []*TeamMember `json:"team_members" db:"-"` -} - -func (me *Session) DeepCopy() *Session { - copySession := *me - - if me.Props != nil { - copySession.Props = CopyStringMap(me.Props) - } - - if me.TeamMembers != nil { - copySession.TeamMembers = make([]*TeamMember, len(me.TeamMembers)) - for index, tm := range me.TeamMembers { - copySession.TeamMembers[index] = new(TeamMember) - *copySession.TeamMembers[index] = *tm - } - } - - return ©Session -} - -func (me *Session) ToJson() string { - b, _ := json.Marshal(me) - return string(b) -} - -func SessionFromJson(data io.Reader) *Session { - var me *Session - json.NewDecoder(data).Decode(&me) - return me -} - -func (me *Session) PreSave() { - if me.Id == "" { - me.Id = NewId() - } - - if me.Token == "" { - me.Token = NewId() - } - - me.CreateAt = GetMillis() - me.LastActivityAt = me.CreateAt - - if me.Props == nil { - me.Props = make(map[string]string) - } -} - -func (me *Session) Sanitize() { - me.Token = "" -} - -func (me *Session) IsExpired() bool { - - if me.ExpiresAt <= 0 { - return false - } - - if GetMillis() > me.ExpiresAt { - return true - } - - return false -} - -func (me *Session) SetExpireInDays(days int) { - if me.CreateAt == 0 { - me.ExpiresAt = GetMillis() + (1000 * 60 * 60 * 24 * int64(days)) - } else { - me.ExpiresAt = me.CreateAt + (1000 * 60 * 60 * 24 * int64(days)) - } -} - -func (me *Session) AddProp(key string, value string) { - - if me.Props == nil { - me.Props = make(map[string]string) - } - - me.Props[key] = value -} - -func (me *Session) GetTeamByTeamId(teamId string) *TeamMember { - for _, team := range me.TeamMembers { - if team.TeamId == teamId { - return team - } - } - - return nil -} - -func (me *Session) IsMobileApp() bool { - return len(me.DeviceId) > 0 -} - -func (me *Session) GetUserRoles() []string { - return strings.Fields(me.Roles) -} - -func (me *Session) GenerateCSRF() string { - token := NewId() - me.AddProp("csrf", token) - return token -} - -func (me *Session) GetCSRF() string { - if me.Props == nil { - return "" - } - - return me.Props["csrf"] -} - -func SessionsToJson(o []*Session) string { - if b, err := json.Marshal(o); err != nil { - return "[]" - } else { - return string(b) - } -} - -func SessionsFromJson(data io.Reader) []*Session { - var o []*Session - json.NewDecoder(data).Decode(&o) - return o -} diff --git a/vendor/github.com/mattermost/mattermost-server/model/system.go b/vendor/github.com/mattermost/mattermost-server/model/system.go deleted file mode 100644 index 334fc43..0000000 --- a/vendor/github.com/mattermost/mattermost-server/model/system.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package model - -import ( - "encoding/json" - "io" - "math/big" -) - -const ( - SYSTEM_DIAGNOSTIC_ID = "DiagnosticId" - SYSTEM_RAN_UNIT_TESTS = "RanUnitTests" - SYSTEM_LAST_SECURITY_TIME = "LastSecurityTime" - SYSTEM_ACTIVE_LICENSE_ID = "ActiveLicenseId" - SYSTEM_LAST_COMPLIANCE_TIME = "LastComplianceTime" - SYSTEM_ASYMMETRIC_SIGNING_KEY = "AsymmetricSigningKey" - SYSTEM_POST_ACTION_COOKIE_SECRET = "PostActionCookieSecret" - SYSTEM_INSTALLATION_DATE_KEY = "InstallationDate" -) - -type System struct { - Name string `json:"name"` - Value string `json:"value"` -} - -func (o *System) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func SystemFromJson(data io.Reader) *System { - var o *System - json.NewDecoder(data).Decode(&o) - return o -} - -type SystemPostActionCookieSecret struct { - Secret []byte `json:"key,omitempty"` -} - -type SystemAsymmetricSigningKey struct { - ECDSAKey *SystemECDSAKey `json:"ecdsa_key,omitempty"` -} - -type SystemECDSAKey struct { - Curve string `json:"curve"` - X *big.Int `json:"x"` - Y *big.Int `json:"y"` - D *big.Int `json:"d,omitempty"` -} diff --git a/vendor/github.com/mattermost/mattermost-server/model/team_member.go b/vendor/github.com/mattermost/mattermost-server/model/team_member.go deleted file mode 100644 index 4b33c14..0000000 --- a/vendor/github.com/mattermost/mattermost-server/model/team_member.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package model - -import ( - "encoding/json" - "io" - "net/http" - "strings" -) - -type TeamMember struct { - TeamId string `json:"team_id"` - UserId string `json:"user_id"` - Roles string `json:"roles"` - DeleteAt int64 `json:"delete_at"` - SchemeGuest bool `json:"scheme_guest"` - SchemeUser bool `json:"scheme_user"` - SchemeAdmin bool `json:"scheme_admin"` - ExplicitRoles string `json:"explicit_roles"` -} - -type TeamUnread struct { - TeamId string `json:"team_id"` - MsgCount int64 `json:"msg_count"` - MentionCount int64 `json:"mention_count"` -} - -type TeamMemberForExport struct { - TeamMember - TeamName string -} - -func (o *TeamMember) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func (o *TeamUnread) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func TeamMemberFromJson(data io.Reader) *TeamMember { - var o *TeamMember - json.NewDecoder(data).Decode(&o) - return o -} - -func TeamUnreadFromJson(data io.Reader) *TeamUnread { - var o *TeamUnread - json.NewDecoder(data).Decode(&o) - return o -} - -func TeamMembersToJson(o []*TeamMember) string { - if b, err := json.Marshal(o); err != nil { - return "[]" - } else { - return string(b) - } -} - -func TeamMembersFromJson(data io.Reader) []*TeamMember { - var o []*TeamMember - json.NewDecoder(data).Decode(&o) - return o -} - -func TeamsUnreadToJson(o []*TeamUnread) string { - if b, err := json.Marshal(o); err != nil { - return "[]" - } else { - return string(b) - } -} - -func TeamsUnreadFromJson(data io.Reader) []*TeamUnread { - var o []*TeamUnread - json.NewDecoder(data).Decode(&o) - return o -} - -func (o *TeamMember) IsValid() *AppError { - - if len(o.TeamId) != 26 { - return NewAppError("TeamMember.IsValid", "model.team_member.is_valid.team_id.app_error", nil, "", http.StatusBadRequest) - } - - if len(o.UserId) != 26 { - return NewAppError("TeamMember.IsValid", "model.team_member.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) - } - - return nil -} - -func (o *TeamMember) PreUpdate() { -} - -func (o *TeamMember) GetRoles() []string { - return strings.Fields(o.Roles) -} diff --git a/vendor/github.com/mattermost/mattermost-server/model/team_search.go b/vendor/github.com/mattermost/mattermost-server/model/team_search.go deleted file mode 100644 index e067602..0000000 --- a/vendor/github.com/mattermost/mattermost-server/model/team_search.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package model - -import ( - "encoding/json" - "io" -) - -type TeamSearch struct { - Term string `json:"term"` -} - -// ToJson convert a TeamSearch to json string -func (c *TeamSearch) ToJson() string { - b, err := json.Marshal(c) - if err != nil { - return "" - } - - return string(b) -} - -// TeamSearchFromJson decodes the input and returns a TeamSearch -func TeamSearchFromJson(data io.Reader) *TeamSearch { - decoder := json.NewDecoder(data) - var cs TeamSearch - err := decoder.Decode(&cs) - if err == nil { - return &cs - } - - return nil -} diff --git a/vendor/github.com/mattermost/mattermost-server/model/user_count.go b/vendor/github.com/mattermost/mattermost-server/model/user_count.go deleted file mode 100644 index 03dbc95..0000000 --- a/vendor/github.com/mattermost/mattermost-server/model/user_count.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package model - -// Options for counting users -type UserCountOptions struct { - // Should include users that are bots - IncludeBotAccounts bool - // Should include deleted users (of any type) - IncludeDeleted bool - // Exclude regular users - ExcludeRegularUsers bool - // Only include users on a specific team. "" for any team. - TeamId string - // Restrict to search in a list of teams and channels - ViewRestrictions *ViewUsersRestrictions -} diff --git a/vendor/github.com/mattermost/mattermost-server/model/websocket_client.go b/vendor/github.com/mattermost/mattermost-server/model/websocket_client.go deleted file mode 100644 index 4e6c1d8..0000000 --- a/vendor/github.com/mattermost/mattermost-server/model/websocket_client.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package model - -import ( - "encoding/json" - "net/http" - "time" - - "github.com/gorilla/websocket" -) - -const ( - SOCKET_MAX_MESSAGE_SIZE_KB = 8 * 1024 // 8KB - PING_TIMEOUT_BUFFER_SECONDS = 5 -) - -type WebSocketClient struct { - Url string // The location of the server like "ws://localhost:8065" - ApiUrl string // The api location of the server like "ws://localhost:8065/api/v3" - ConnectUrl string // The websocket URL to connect to like "ws://localhost:8065/api/v3/path/to/websocket" - Conn *websocket.Conn // The WebSocket connection - AuthToken string // The token used to open the WebSocket - Sequence int64 // The ever-incrementing sequence attached to each WebSocket action - PingTimeoutChannel chan bool // The channel used to signal ping timeouts - EventChannel chan *WebSocketEvent - ResponseChannel chan *WebSocketResponse - ListenError *AppError - pingTimeoutTimer *time.Timer -} - -// NewWebSocketClient constructs a new WebSocket client with convenience -// methods for talking to the server. -func NewWebSocketClient(url, authToken string) (*WebSocketClient, *AppError) { - return NewWebSocketClientWithDialer(websocket.DefaultDialer, url, authToken) -} - -// NewWebSocketClientWithDialer constructs a new WebSocket client with convenience -// methods for talking to the server using a custom dialer. -func NewWebSocketClientWithDialer(dialer *websocket.Dialer, url, authToken string) (*WebSocketClient, *AppError) { - conn, _, err := dialer.Dial(url+API_URL_SUFFIX+"/websocket", nil) - if err != nil { - return nil, NewAppError("NewWebSocketClient", "model.websocket_client.connect_fail.app_error", nil, err.Error(), http.StatusInternalServerError) - } - - client := &WebSocketClient{ - url, - url + API_URL_SUFFIX, - url + API_URL_SUFFIX + "/websocket", - conn, - authToken, - 1, - make(chan bool, 1), - make(chan *WebSocketEvent, 100), - make(chan *WebSocketResponse, 100), - nil, - nil, - } - - client.configurePingHandling() - - client.SendMessage(WEBSOCKET_AUTHENTICATION_CHALLENGE, map[string]interface{}{"token": authToken}) - - return client, nil -} - -// NewWebSocketClient4 constructs a new WebSocket client with convenience -// methods for talking to the server. Uses the v4 endpoint. -func NewWebSocketClient4(url, authToken string) (*WebSocketClient, *AppError) { - return NewWebSocketClient4WithDialer(websocket.DefaultDialer, url, authToken) -} - -// NewWebSocketClient4WithDialer constructs a new WebSocket client with convenience -// methods for talking to the server using a custom dialer. Uses the v4 endpoint. -func NewWebSocketClient4WithDialer(dialer *websocket.Dialer, url, authToken string) (*WebSocketClient, *AppError) { - return NewWebSocketClientWithDialer(dialer, url, authToken) -} - -func (wsc *WebSocketClient) Connect() *AppError { - return wsc.ConnectWithDialer(websocket.DefaultDialer) -} - -func (wsc *WebSocketClient) ConnectWithDialer(dialer *websocket.Dialer) *AppError { - var err error - wsc.Conn, _, err = dialer.Dial(wsc.ConnectUrl, nil) - if err != nil { - return NewAppError("Connect", "model.websocket_client.connect_fail.app_error", nil, err.Error(), http.StatusInternalServerError) - } - - wsc.configurePingHandling() - - wsc.EventChannel = make(chan *WebSocketEvent, 100) - wsc.ResponseChannel = make(chan *WebSocketResponse, 100) - - wsc.SendMessage(WEBSOCKET_AUTHENTICATION_CHALLENGE, map[string]interface{}{"token": wsc.AuthToken}) - - return nil -} - -func (wsc *WebSocketClient) Close() { - wsc.Conn.Close() -} - -func (wsc *WebSocketClient) Listen() { - go func() { - defer func() { - wsc.Conn.Close() - close(wsc.EventChannel) - close(wsc.ResponseChannel) - }() - - for { - var rawMsg json.RawMessage - var err error - if _, rawMsg, err = wsc.Conn.ReadMessage(); err != nil { - if !websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseNoStatusReceived) { - wsc.ListenError = NewAppError("NewWebSocketClient", "model.websocket_client.connect_fail.app_error", nil, err.Error(), http.StatusInternalServerError) - } - - return - } - - var event WebSocketEvent - if err := json.Unmarshal(rawMsg, &event); err == nil && event.IsValid() { - wsc.EventChannel <- &event - continue - } - - var response WebSocketResponse - if err := json.Unmarshal(rawMsg, &response); err == nil && response.IsValid() { - wsc.ResponseChannel <- &response - continue - } - - } - }() -} - -func (wsc *WebSocketClient) SendMessage(action string, data map[string]interface{}) { - req := &WebSocketRequest{} - req.Seq = wsc.Sequence - req.Action = action - req.Data = data - - wsc.Sequence++ - - wsc.Conn.WriteJSON(req) -} - -// UserTyping will push a user_typing event out to all connected users -// who are in the specified channel -func (wsc *WebSocketClient) UserTyping(channelId, parentId string) { - data := map[string]interface{}{ - "channel_id": channelId, - "parent_id": parentId, - } - - wsc.SendMessage("user_typing", data) -} - -// GetStatuses will return a map of string statuses using user id as the key -func (wsc *WebSocketClient) GetStatuses() { - wsc.SendMessage("get_statuses", nil) -} - -// GetStatusesByIds will fetch certain user statuses based on ids and return -// a map of string statuses using user id as the key -func (wsc *WebSocketClient) GetStatusesByIds(userIds []string) { - data := map[string]interface{}{ - "user_ids": userIds, - } - wsc.SendMessage("get_statuses_by_ids", data) -} - -func (wsc *WebSocketClient) configurePingHandling() { - wsc.Conn.SetPingHandler(wsc.pingHandler) - wsc.pingTimeoutTimer = time.NewTimer(time.Second * (60 + PING_TIMEOUT_BUFFER_SECONDS)) - go wsc.pingWatchdog() -} - -func (wsc *WebSocketClient) pingHandler(appData string) error { - if !wsc.pingTimeoutTimer.Stop() { - <-wsc.pingTimeoutTimer.C - } - - wsc.pingTimeoutTimer.Reset(time.Second * (60 + PING_TIMEOUT_BUFFER_SECONDS)) - wsc.Conn.WriteMessage(websocket.PongMessage, []byte{}) - return nil -} - -func (wsc *WebSocketClient) pingWatchdog() { - <-wsc.pingTimeoutTimer.C - wsc.PingTimeoutChannel <- true -} diff --git a/vendor/github.com/mattermost/mattermost-server/model/websocket_message.go b/vendor/github.com/mattermost/mattermost-server/model/websocket_message.go deleted file mode 100644 index 132b89a..0000000 --- a/vendor/github.com/mattermost/mattermost-server/model/websocket_message.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package model - -import ( - "encoding/json" - "fmt" - "io" -) - -const ( - WEBSOCKET_EVENT_TYPING = "typing" - WEBSOCKET_EVENT_POSTED = "posted" - WEBSOCKET_EVENT_POST_EDITED = "post_edited" - WEBSOCKET_EVENT_POST_DELETED = "post_deleted" - WEBSOCKET_EVENT_CHANNEL_CONVERTED = "channel_converted" - WEBSOCKET_EVENT_CHANNEL_CREATED = "channel_created" - WEBSOCKET_EVENT_CHANNEL_DELETED = "channel_deleted" - WEBSOCKET_EVENT_CHANNEL_UPDATED = "channel_updated" - WEBSOCKET_EVENT_CHANNEL_MEMBER_UPDATED = "channel_member_updated" - WEBSOCKET_EVENT_DIRECT_ADDED = "direct_added" - WEBSOCKET_EVENT_GROUP_ADDED = "group_added" - WEBSOCKET_EVENT_NEW_USER = "new_user" - WEBSOCKET_EVENT_ADDED_TO_TEAM = "added_to_team" - WEBSOCKET_EVENT_LEAVE_TEAM = "leave_team" - WEBSOCKET_EVENT_UPDATE_TEAM = "update_team" - WEBSOCKET_EVENT_DELETE_TEAM = "delete_team" - WEBSOCKET_EVENT_RESTORE_TEAM = "restore_team" - WEBSOCKET_EVENT_USER_ADDED = "user_added" - WEBSOCKET_EVENT_USER_UPDATED = "user_updated" - WEBSOCKET_EVENT_USER_ROLE_UPDATED = "user_role_updated" - WEBSOCKET_EVENT_MEMBERROLE_UPDATED = "memberrole_updated" - WEBSOCKET_EVENT_USER_REMOVED = "user_removed" - WEBSOCKET_EVENT_PREFERENCE_CHANGED = "preference_changed" - WEBSOCKET_EVENT_PREFERENCES_CHANGED = "preferences_changed" - WEBSOCKET_EVENT_PREFERENCES_DELETED = "preferences_deleted" - WEBSOCKET_EVENT_EPHEMERAL_MESSAGE = "ephemeral_message" - WEBSOCKET_EVENT_STATUS_CHANGE = "status_change" - WEBSOCKET_EVENT_HELLO = "hello" - WEBSOCKET_AUTHENTICATION_CHALLENGE = "authentication_challenge" - WEBSOCKET_EVENT_REACTION_ADDED = "reaction_added" - WEBSOCKET_EVENT_REACTION_REMOVED = "reaction_removed" - WEBSOCKET_EVENT_RESPONSE = "response" - WEBSOCKET_EVENT_EMOJI_ADDED = "emoji_added" - WEBSOCKET_EVENT_CHANNEL_VIEWED = "channel_viewed" - WEBSOCKET_EVENT_PLUGIN_STATUSES_CHANGED = "plugin_statuses_changed" - WEBSOCKET_EVENT_PLUGIN_ENABLED = "plugin_enabled" - WEBSOCKET_EVENT_PLUGIN_DISABLED = "plugin_disabled" - WEBSOCKET_EVENT_ROLE_UPDATED = "role_updated" - WEBSOCKET_EVENT_LICENSE_CHANGED = "license_changed" - WEBSOCKET_EVENT_CONFIG_CHANGED = "config_changed" - WEBSOCKET_EVENT_OPEN_DIALOG = "open_dialog" -) - -type WebSocketMessage interface { - ToJson() string - IsValid() bool - EventType() string -} - -type WebsocketBroadcast struct { - OmitUsers map[string]bool `json:"omit_users"` // broadcast is omitted for users listed here - UserId string `json:"user_id"` // broadcast only occurs for this user - ChannelId string `json:"channel_id"` // broadcast only occurs for users in this channel - TeamId string `json:"team_id"` // broadcast only occurs for users in this team - ContainsSanitizedData bool `json:"-"` - ContainsSensitiveData bool `json:"-"` -} - -type precomputedWebSocketEventJSON struct { - Event json.RawMessage - Data json.RawMessage - Broadcast json.RawMessage -} - -type WebSocketEvent struct { - Event string `json:"event"` - Data map[string]interface{} `json:"data"` - Broadcast *WebsocketBroadcast `json:"broadcast"` - Sequence int64 `json:"seq"` - - precomputedJSON *precomputedWebSocketEventJSON -} - -// PrecomputeJSON precomputes and stores the serialized JSON for all fields other than Sequence. -// This makes ToJson much more efficient when sending the same event to multiple connections. -func (m *WebSocketEvent) PrecomputeJSON() { - event, _ := json.Marshal(m.Event) - data, _ := json.Marshal(m.Data) - broadcast, _ := json.Marshal(m.Broadcast) - m.precomputedJSON = &precomputedWebSocketEventJSON{ - Event: json.RawMessage(event), - Data: json.RawMessage(data), - Broadcast: json.RawMessage(broadcast), - } -} - -func (m *WebSocketEvent) Add(key string, value interface{}) { - m.Data[key] = value -} - -func NewWebSocketEvent(event, teamId, channelId, userId string, omitUsers map[string]bool) *WebSocketEvent { - return &WebSocketEvent{Event: event, Data: make(map[string]interface{}), - Broadcast: &WebsocketBroadcast{TeamId: teamId, ChannelId: channelId, UserId: userId, OmitUsers: omitUsers}} -} - -func (o *WebSocketEvent) IsValid() bool { - return o.Event != "" -} - -func (o *WebSocketEvent) EventType() string { - return o.Event -} - -func (o *WebSocketEvent) ToJson() string { - if o.precomputedJSON != nil { - return fmt.Sprintf(`{"event": %s, "data": %s, "broadcast": %s, "seq": %d}`, o.precomputedJSON.Event, o.precomputedJSON.Data, o.precomputedJSON.Broadcast, o.Sequence) - } - b, _ := json.Marshal(o) - return string(b) -} - -func WebSocketEventFromJson(data io.Reader) *WebSocketEvent { - var o *WebSocketEvent - json.NewDecoder(data).Decode(&o) - return o -} - -type WebSocketResponse struct { - Status string `json:"status"` - SeqReply int64 `json:"seq_reply,omitempty"` - Data map[string]interface{} `json:"data,omitempty"` - Error *AppError `json:"error,omitempty"` -} - -func (m *WebSocketResponse) Add(key string, value interface{}) { - m.Data[key] = value -} - -func NewWebSocketResponse(status string, seqReply int64, data map[string]interface{}) *WebSocketResponse { - return &WebSocketResponse{Status: status, SeqReply: seqReply, Data: data} -} - -func NewWebSocketError(seqReply int64, err *AppError) *WebSocketResponse { - return &WebSocketResponse{Status: STATUS_FAIL, SeqReply: seqReply, Error: err} -} - -func (o *WebSocketResponse) IsValid() bool { - return o.Status != "" -} - -func (o *WebSocketResponse) EventType() string { - return WEBSOCKET_EVENT_RESPONSE -} - -func (o *WebSocketResponse) ToJson() string { - b, _ := json.Marshal(o) - return string(b) -} - -func WebSocketResponseFromJson(data io.Reader) *WebSocketResponse { - var o *WebSocketResponse - json.NewDecoder(data).Decode(&o) - return o -} diff --git a/vendor/github.com/mattermost/mattermost-server/plugin/health_check.go b/vendor/github.com/mattermost/mattermost-server/plugin/health_check.go deleted file mode 100644 index 7f9855e..0000000 --- a/vendor/github.com/mattermost/mattermost-server/plugin/health_check.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package plugin - -import ( - "fmt" - "time" - - "github.com/mattermost/mattermost-server/mlog" - "github.com/mattermost/mattermost-server/model" -) - -const ( - HEALTH_CHECK_INTERVAL = 30 * time.Second // How often the health check should run - HEALTH_CHECK_DISABLE_DURATION = 60 * time.Minute // How long we wait for num fails to incur before disabling the plugin - HEALTH_CHECK_PING_FAIL_LIMIT = 3 // How many times we call RPC ping in a row before it is considered a failure - HEALTH_CHECK_RESTART_LIMIT = 3 // How many times we restart a plugin before we disable it -) - -type PluginHealthCheckJob struct { - cancel chan struct{} - cancelled chan struct{} - env *Environment -} - -// InitPluginHealthCheckJob starts a new job if one is not running and is set to enabled, or kills an existing one if set to disabled. -func (env *Environment) InitPluginHealthCheckJob(enable bool) { - // Config is set to enable. No job exists, start a new job. - if enable && env.pluginHealthCheckJob == nil { - mlog.Debug("Enabling plugin health check job", mlog.Duration("interval_s", HEALTH_CHECK_INTERVAL)) - - job := newPluginHealthCheckJob(env) - env.pluginHealthCheckJob = job - job.Start() - } - - // Config is set to disable. Job exists, kill existing job. - if !enable && env.pluginHealthCheckJob != nil { - mlog.Debug("Disabling plugin health check job") - - env.pluginHealthCheckJob.Cancel() - env.pluginHealthCheckJob = nil - } -} - -// Start continuously runs health checks on all active plugins, on a timer. -func (job *PluginHealthCheckJob) Start() { - mlog.Debug("Plugin health check job starting.") - - go func() { - defer close(job.cancelled) - - ticker := time.NewTicker(HEALTH_CHECK_INTERVAL) - defer func() { - ticker.Stop() - }() - - for { - select { - case <-ticker.C: - activePlugins := job.env.Active() - for _, plugin := range activePlugins { - job.checkPlugin(plugin.Manifest.Id) - } - case <-job.cancel: - return - } - } - }() -} - -// checkPlugin determines the plugin's health status, then handles the error or success case. -func (job *PluginHealthCheckJob) checkPlugin(id string) { - p, ok := job.env.registeredPlugins.Load(id) - if !ok { - return - } - rp := p.(*registeredPlugin) - - sup := rp.supervisor - if sup == nil { - return - } - - pluginErr := sup.PerformHealthCheck() - - if pluginErr != nil { - mlog.Error(fmt.Sprintf("Health check failed for plugin %s, error: %s", id, pluginErr.Error())) - job.handleHealthCheckFail(id, pluginErr) - } -} - -// handleHealthCheckFail restarts or deactivates the plugin based on how many times it has failed in a configured amount of time. -func (job *PluginHealthCheckJob) handleHealthCheckFail(id string, err error) { - rp, ok := job.env.registeredPlugins.Load(id) - if !ok { - return - } - p := rp.(*registeredPlugin) - - // Append current failure before checking for deactivate vs restart action - p.failTimeStamps = append(p.failTimeStamps, time.Now()) - p.lastError = err - - if shouldDeactivatePlugin(p) { - p.failTimeStamps = []time.Time{} - mlog.Debug(fmt.Sprintf("Deactivating plugin due to multiple crashes `%s`", id)) - job.env.Deactivate(id) - job.env.SetPluginState(id, model.PluginStateFailedToStayRunning) - } else { - mlog.Debug(fmt.Sprintf("Restarting plugin due to failed health check `%s`", id)) - if err := job.env.RestartPlugin(id); err != nil { - mlog.Error(fmt.Sprintf("Failed to restart plugin `%s`: %s", id, err.Error())) - } - } -} - -func newPluginHealthCheckJob(env *Environment) *PluginHealthCheckJob { - return &PluginHealthCheckJob{ - cancel: make(chan struct{}), - cancelled: make(chan struct{}), - env: env, - } -} - -func (job *PluginHealthCheckJob) Cancel() { - close(job.cancel) - <-job.cancelled -} - -// shouldDeactivatePlugin determines if a plugin needs to be deactivated after certain criteria is met. -// -// The criteria is based on if the plugin has consistently failed during the configured number of restarts, within the configured time window. -func shouldDeactivatePlugin(rp *registeredPlugin) bool { - if len(rp.failTimeStamps) >= HEALTH_CHECK_RESTART_LIMIT { - index := len(rp.failTimeStamps) - HEALTH_CHECK_RESTART_LIMIT - t := rp.failTimeStamps[index] - now := time.Now() - elapsed := now.Sub(t) - if elapsed <= HEALTH_CHECK_DISABLE_DURATION { - return true - } - } - return false -} diff --git a/vendor/github.com/mattermost/mattermost-server/plugin/helpers.go b/vendor/github.com/mattermost/mattermost-server/plugin/helpers.go deleted file mode 100644 index ed230cd..0000000 --- a/vendor/github.com/mattermost/mattermost-server/plugin/helpers.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package plugin - -import "github.com/mattermost/mattermost-server/model" - -type Helpers interface { - // EnsureBot either returns an existing bot user matching the given bot, or creates a bot user from the given bot. - // Returns the id of the resulting bot. - EnsureBot(bot *model.Bot) (string, error) - - // KVSetJSON stores a key-value pair, unique per plugin, marshalling the given value as a JSON string. - KVSetJSON(key string, value interface{}) error - - // KVCompareAndSetJSON updates a key-value pair, unique per plugin, but only if the current value matches the given oldValue after marshalling as a JSON string. - // Inserts a new key if oldValue == nil. - // Returns (false, err) if DB error occurred - // Returns (false, nil) if current value != oldValue or key already exists when inserting - // Returns (true, nil) if current value == oldValue or new key is inserted - // - // Minimum server version: 5.12 - KVCompareAndSetJSON(key string, oldValue interface{}, newValue interface{}) (bool, error) - - // KVGetJSON retrieves a value based on the key, unique per plugin, unmarshalling the previously set JSON string into the given value. Returns true if the key exists. - KVGetJSON(key string, value interface{}) (bool, error) - - // KVSetWithExpiryJSON stores a key-value pair with an expiry time, unique per plugin, marshalling the given value as a JSON string. - // - // Minimum server version: 5.6 - KVSetWithExpiryJSON(key string, value interface{}, expireInSeconds int64) error -} - -type HelpersImpl struct { - API API -} diff --git a/vendor/github.com/mattermost/mattermost-server/plugin/helpers_bots.go b/vendor/github.com/mattermost/mattermost-server/plugin/helpers_bots.go deleted file mode 100644 index 28ed4df..0000000 --- a/vendor/github.com/mattermost/mattermost-server/plugin/helpers_bots.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package plugin - -import ( - "github.com/mattermost/mattermost-server/model" - "github.com/mattermost/mattermost-server/utils" - "github.com/pkg/errors" -) - -func (p *HelpersImpl) EnsureBot(bot *model.Bot) (retBotId string, retErr error) { - // Must provide a bot with a username - if bot == nil || len(bot.Username) < 1 { - return "", errors.New("passed a bad bot, nil or no username") - } - - // If we fail for any reason, this could be a race between creation of bot and - // retrieval from another EnsureBot. Just try the basic retrieve existing again. - defer func() { - if retBotId == "" || retErr != nil { - var err error - var botIdBytes []byte - - err = utils.ProgressiveRetry(func() error { - botIdBytes, err = p.API.KVGet(BOT_USER_KEY) - if err != nil { - return err - } - return nil - }) - - if err == nil && botIdBytes != nil { - retBotId = string(botIdBytes) - retErr = nil - } - } - }() - - botIdBytes, kvGetErr := p.API.KVGet(BOT_USER_KEY) - if kvGetErr != nil { - return "", errors.Wrap(kvGetErr, "failed to get bot") - } - - // If the bot has already been created, there is nothing to do. - if botIdBytes != nil { - botId := string(botIdBytes) - return botId, nil - } - - // Check for an existing bot user with that username. If one exists, then use that. - if user, userGetErr := p.API.GetUserByUsername(bot.Username); userGetErr == nil && user != nil { - if user.IsBot { - if kvSetErr := p.API.KVSet(BOT_USER_KEY, []byte(user.Id)); kvSetErr != nil { - p.API.LogWarn("Failed to set claimed bot user id.", "userid", user.Id, "err", kvSetErr) - } - } else { - p.API.LogError("Plugin attempted to use an account that already exists. Convert user to a bot account in the CLI by running 'mattermost user convert --bot'. If the user is an existing user account you want to preserve, change its username and restart the Mattermost server, after which the plugin will create a bot account with that name. For more information about bot accounts, see https://mattermost.com/pl/default-bot-accounts", "username", bot.Username, "user_id", user.Id) - } - return user.Id, nil - } - - // Create a new bot user for the plugin - createdBot, createBotErr := p.API.CreateBot(bot) - if createBotErr != nil { - return "", errors.Wrap(createBotErr, "failed to create bot") - } - - if kvSetErr := p.API.KVSet(BOT_USER_KEY, []byte(createdBot.UserId)); kvSetErr != nil { - p.API.LogWarn("Failed to set created bot user id.", "userid", createdBot.UserId, "err", kvSetErr) - } - - return createdBot.UserId, nil -} diff --git a/vendor/github.com/mattermost/mattermost-server/plugin/helpers_kv.go b/vendor/github.com/mattermost/mattermost-server/plugin/helpers_kv.go deleted file mode 100644 index c5a3219..0000000 --- a/vendor/github.com/mattermost/mattermost-server/plugin/helpers_kv.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See LICENSE.txt for license information. - -package plugin - -import ( - "encoding/json" - - "github.com/pkg/errors" -) - -// KVGetJSON is a wrapper around KVGet to simplify reading a JSON object from the key value store. -func (p *HelpersImpl) KVGetJSON(key string, value interface{}) (bool, error) { - data, appErr := p.API.KVGet(key) - if appErr != nil { - return false, appErr - } - if data == nil { - return false, nil - } - - err := json.Unmarshal(data, value) - if err != nil { - return false, err - } - - return true, nil -} - -// KVSetJSON is a wrapper around KVSet to simplify writing a JSON object to the key value store. -func (p *HelpersImpl) KVSetJSON(key string, value interface{}) error { - data, err := json.Marshal(value) - if err != nil { - return err - } - - appErr := p.API.KVSet(key, data) - if appErr != nil { - return appErr - } - - return nil -} - -// KVCompareAndSetJSON is a wrapper around KVCompareAndSet to simplify atomically writing a JSON object to the key value store. -func (p *HelpersImpl) KVCompareAndSetJSON(key string, oldValue interface{}, newValue interface{}) (bool, error) { - var oldData, newData []byte - var err error - - if oldValue != nil { - oldData, err = json.Marshal(oldValue) - if err != nil { - return false, errors.Wrap(err, "unable to marshal old value") - } - } - - if newValue != nil { - newData, err = json.Marshal(newValue) - if err != nil { - return false, errors.Wrap(err, "unable to marshal new value") - } - } - - set, appErr := p.API.KVCompareAndSet(key, oldData, newData) - if appErr != nil { - return set, appErr - } - - return set, nil -} - -// KVSetWithExpiryJSON is a wrapper around KVSetWithExpiry to simplify atomically writing a JSON object with expiry to the key value store. -func (p *HelpersImpl) KVSetWithExpiryJSON(key string, value interface{}, expireInSeconds int64) error { - data, err := json.Marshal(value) - if err != nil { - return err - } - - appErr := p.API.KVSetWithExpiry(key, data, expireInSeconds) - if appErr != nil { - return appErr - } - - return nil -} diff --git a/vendor/github.com/mattermost/mattermost-server/plugin/plugintest/helpers.go b/vendor/github.com/mattermost/mattermost-server/plugin/plugintest/helpers.go deleted file mode 100644 index bf474c8..0000000 --- a/vendor/github.com/mattermost/mattermost-server/plugin/plugintest/helpers.go +++ /dev/null @@ -1,104 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -// Regenerate this file using `make plugin-mocks`. - -package plugintest - -import mock "github.com/stretchr/testify/mock" -import model "github.com/mattermost/mattermost-server/model" - -// Helpers is an autogenerated mock type for the Helpers type -type Helpers struct { - mock.Mock -} - -// EnsureBot provides a mock function with given fields: bot -func (_m *Helpers) EnsureBot(bot *model.Bot) (string, error) { - ret := _m.Called(bot) - - var r0 string - if rf, ok := ret.Get(0).(func(*model.Bot) string); ok { - r0 = rf(bot) - } else { - r0 = ret.Get(0).(string) - } - - var r1 error - if rf, ok := ret.Get(1).(func(*model.Bot) error); ok { - r1 = rf(bot) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// KVCompareAndSetJSON provides a mock function with given fields: key, oldValue, newValue -func (_m *Helpers) KVCompareAndSetJSON(key string, oldValue interface{}, newValue interface{}) (bool, error) { - ret := _m.Called(key, oldValue, newValue) - - var r0 bool - if rf, ok := ret.Get(0).(func(string, interface{}, interface{}) bool); ok { - r0 = rf(key, oldValue, newValue) - } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func(string, interface{}, interface{}) error); ok { - r1 = rf(key, oldValue, newValue) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// KVGetJSON provides a mock function with given fields: key, value -func (_m *Helpers) KVGetJSON(key string, value interface{}) (bool, error) { - ret := _m.Called(key, value) - - var r0 bool - if rf, ok := ret.Get(0).(func(string, interface{}) bool); ok { - r0 = rf(key, value) - } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func(string, interface{}) error); ok { - r1 = rf(key, value) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// KVSetJSON provides a mock function with given fields: key, value -func (_m *Helpers) KVSetJSON(key string, value interface{}) error { - ret := _m.Called(key, value) - - var r0 error - if rf, ok := ret.Get(0).(func(string, interface{}) error); ok { - r0 = rf(key, value) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// KVSetWithExpiryJSON provides a mock function with given fields: key, value, expireInSeconds -func (_m *Helpers) KVSetWithExpiryJSON(key string, value interface{}, expireInSeconds int64) error { - ret := _m.Called(key, value, expireInSeconds) - - var r0 error - if rf, ok := ret.Get(0).(func(string, interface{}, int64) error); ok { - r0 = rf(key, value, expireInSeconds) - } else { - r0 = ret.Error(0) - } - - return r0 -} diff --git a/vendor/github.com/mattermost/mattermost-server/scripts/license-check.sh b/vendor/github.com/mattermost/mattermost-server/scripts/license-check.sh deleted file mode 100755 index 7c0e063..0000000 --- a/vendor/github.com/mattermost/mattermost-server/scripts/license-check.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash -set -e -IFS=$'\n' -count=0 -for fileType in GoFiles; do - for file in `go list -f $'{{range .GoFiles}}{{$.Dir}}/{{.}}\n{{end}}' "$@"`; do - case $file in - */utils/lru.go|*/utils/imgutils/gif.go|*/store/storetest/mocks/*|*/services/*/mocks/*|*/app/plugin/jira/plugin_*|*/plugin/plugintest/*|*/app/plugin/zoom/plugin_*|*/einterfaces/mocks/*) - # Third-party, doesn't require a header. - ;; - *) - if ! grep 'Mattermost, Inc. All Rights Reserved.' $file -q; then - >&2 echo "FAIL: $file is missing a license header." - ((count++)) - fi - esac - done -done -if [ $count -eq 0 ]; then - exit 0 -fi - -if [ $count -gt 1 ]; then - >&2 echo "$count files are missing license headers." -else - >&2 echo "$count file is missing a license header." -fi -exit 1 diff --git a/vendor/github.com/mattermost/mattermost-server/utils/api.go b/vendor/github.com/mattermost/mattermost-server/utils/api.go deleted file mode 100644 index 6e513b3..0000000 --- a/vendor/github.com/mattermost/mattermost-server/utils/api.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package utils - -import ( - "crypto" - "crypto/rand" - "encoding/base64" - "fmt" - "html/template" - "net/http" - "net/url" - "path" - "strings" - - "github.com/mattermost/mattermost-server/model" -) - -func CheckOrigin(r *http.Request, allowedOrigins string) bool { - origin := r.Header.Get("Origin") - if origin == "" { - return true - } - - if allowedOrigins == "*" { - return true - } - for _, allowed := range strings.Split(allowedOrigins, " ") { - if allowed == origin { - return true - } - } - return false -} - -func OriginChecker(allowedOrigins string) func(*http.Request) bool { - return func(r *http.Request) bool { - return CheckOrigin(r, allowedOrigins) - } -} - -func RenderWebAppError(config *model.Config, w http.ResponseWriter, r *http.Request, err *model.AppError, s crypto.Signer) { - RenderWebError(config, w, r, err.StatusCode, url.Values{ - "message": []string{err.Message}, - }, s) -} - -func RenderWebError(config *model.Config, w http.ResponseWriter, r *http.Request, status int, params url.Values, s crypto.Signer) { - queryString := params.Encode() - - subpath, _ := GetSubpathFromConfig(config) - - h := crypto.SHA256 - sum := h.New() - sum.Write([]byte(path.Join(subpath, "error") + "?" + queryString)) - signature, err := s.Sign(rand.Reader, sum.Sum(nil), h) - if err != nil { - http.Error(w, "", http.StatusInternalServerError) - return - } - destination := path.Join(subpath, "error") + "?" + queryString + "&s=" + base64.URLEncoding.EncodeToString(signature) - - if status >= 300 && status < 400 { - http.Redirect(w, r, destination, status) - return - } - - w.Header().Set("Content-Type", "text/html") - w.WriteHeader(status) - fmt.Fprintln(w, ``) - fmt.Fprintln(w, ``) - fmt.Fprintln(w, ``) - fmt.Fprintln(w, `...`) - fmt.Fprintln(w, ``) -} diff --git a/vendor/github.com/mattermost/mattermost-server/utils/extract.go b/vendor/github.com/mattermost/mattermost-server/utils/extract.go deleted file mode 100644 index 12f21da..0000000 --- a/vendor/github.com/mattermost/mattermost-server/utils/extract.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package utils - -import ( - "archive/tar" - "compress/gzip" - "fmt" - "io" - "os" - "path/filepath" -) - -// ExtractTarGz takes in an io.Reader containing the bytes for a .tar.gz file and -// a destination string to extract to. -func ExtractTarGz(gzipStream io.Reader, dst string) error { - uncompressedStream, err := gzip.NewReader(gzipStream) - if err != nil { - return fmt.Errorf("ExtractTarGz: NewReader failed: %s", err.Error()) - } - defer uncompressedStream.Close() - - tarReader := tar.NewReader(uncompressedStream) - - for { - header, err := tarReader.Next() - - if err == io.EOF { - break - } - - if err != nil { - return fmt.Errorf("ExtractTarGz: Next() failed: %s", err.Error()) - } - - switch header.Typeflag { - case tar.TypeDir: - if PathTraversesUpward(header.Name) { - return fmt.Errorf("ExtractTarGz: path attempts to traverse upwards") - } - - path := filepath.Join(dst, header.Name) - if err := os.Mkdir(path, 0744); err != nil && !os.IsExist(err) { - return fmt.Errorf("ExtractTarGz: Mkdir() failed: %s", err.Error()) - } - case tar.TypeReg: - if PathTraversesUpward(header.Name) { - return fmt.Errorf("ExtractTarGz: path attempts to traverse upwards") - } - - path := filepath.Join(dst, header.Name) - dir := filepath.Dir(path) - - if err := os.MkdirAll(dir, 0744); err != nil { - return fmt.Errorf("ExtractTarGz: MkdirAll() failed: %s", err.Error()) - } - - outFile, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, os.FileMode(header.Mode)) - if err != nil { - return fmt.Errorf("ExtractTarGz: Create() failed: %s", err.Error()) - } - defer outFile.Close() - if _, err := io.Copy(outFile, tarReader); err != nil { - return fmt.Errorf("ExtractTarGz: Copy() failed: %s", err.Error()) - } - default: - return fmt.Errorf( - "ExtractTarGz: unknown type: %v in %v", - header.Typeflag, - header.Name) - } - } - - return nil -} diff --git a/vendor/github.com/mattermost/mattermost-server/utils/lru.go b/vendor/github.com/mattermost/mattermost-server/utils/lru.go deleted file mode 100644 index 5a51649..0000000 --- a/vendor/github.com/mattermost/mattermost-server/utils/lru.go +++ /dev/null @@ -1,204 +0,0 @@ -// This files was copied/modified from https://github.com/hashicorp/golang-lru -// which was (see below) - -// This package provides a simple LRU cache. It is based on the -// LRU implementation in groupcache: -// https://github.com/golang/groupcache/tree/master/lru - -package utils - -import ( - "container/list" - "sync" - "time" -) - -// Cache is a thread-safe fixed size LRU cache. -type Cache struct { - size int - evictList *list.List - items map[interface{}]*list.Element - lock sync.RWMutex - name string - defaultExpiry int64 - invalidateClusterEvent string - currentGeneration int64 - len int -} - -// entry is used to hold a value in the evictList. -type entry struct { - key interface{} - value interface{} - expires time.Time - generation int64 -} - -// New creates an LRU of the given size. -func NewLru(size int) *Cache { - return &Cache{ - size: size, - evictList: list.New(), - items: make(map[interface{}]*list.Element, size), - } -} - -// New creates an LRU with the given parameters. -func NewLruWithParams(size int, name string, defaultExpiry int64, invalidateClusterEvent string) *Cache { - lru := NewLru(size) - lru.name = name - lru.defaultExpiry = defaultExpiry - lru.invalidateClusterEvent = invalidateClusterEvent - return lru -} - -// Purge is used to completely clear the cache. -func (c *Cache) Purge() { - c.lock.Lock() - defer c.lock.Unlock() - - c.len = 0 - c.currentGeneration++ -} - -// Add adds the given key and value to the store without an expiry. -func (c *Cache) Add(key, value interface{}) { - c.AddWithExpiresInSecs(key, value, 0) -} - -// Add adds the given key and value to the store with the default expiry. -func (c *Cache) AddWithDefaultExpires(key, value interface{}) { - c.AddWithExpiresInSecs(key, value, c.defaultExpiry) -} - -// AddWithExpiresInSecs adds the given key and value to the cache with the given expiry. -func (c *Cache) AddWithExpiresInSecs(key, value interface{}, expireAtSecs int64) { - c.lock.Lock() - defer c.lock.Unlock() - - c.add(key, value, time.Duration(expireAtSecs)*time.Second) -} - -func (c *Cache) add(key, value interface{}, ttl time.Duration) { - var expires time.Time - if ttl > 0 { - expires = time.Now().Add(ttl) - } - - // Check for existing item, ignoring expiry since we'd update anyway. - if ent, ok := c.items[key]; ok { - c.evictList.MoveToFront(ent) - e := ent.Value.(*entry) - e.value = value - e.expires = expires - if e.generation != c.currentGeneration { - e.generation = c.currentGeneration - c.len++ - } - return - } - - // Add new item - ent := &entry{key, value, expires, c.currentGeneration} - entry := c.evictList.PushFront(ent) - c.items[key] = entry - c.len++ - - if c.evictList.Len() > c.size { - c.removeElement(c.evictList.Back()) - } -} - -// Get returns the value stored in the cache for a key, or nil if no value is present. The ok result indicates whether value was found in the cache. -func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { - c.lock.Lock() - defer c.lock.Unlock() - - return c.getValue(key) -} - -func (c *Cache) getValue(key interface{}) (value interface{}, ok bool) { - if ent, ok := c.items[key]; ok { - e := ent.Value.(*entry) - - if e.generation != c.currentGeneration || (!e.expires.IsZero() && time.Now().After(e.expires)) { - c.removeElement(ent) - return nil, false - } - - c.evictList.MoveToFront(ent) - return ent.Value.(*entry).value, true - } - - return nil, false -} - -// GetOrAdd returns the existing value for the key if present. Otherwise, it stores and returns the given value. The loaded result is true if the value was loaded, false if stored. -// This API intentionally deviates from the Add-only variants above for simplicity. We should simplify the entire API in the future. -func (c *Cache) GetOrAdd(key, value interface{}, ttl time.Duration) (actual interface{}, loaded bool) { - c.lock.Lock() - defer c.lock.Unlock() - - // Check for existing item - if actualValue, ok := c.getValue(key); ok { - return actualValue, true - } - - c.add(key, value, ttl) - - return value, false -} - -// Remove deletes the value for a key. -func (c *Cache) Remove(key interface{}) { - c.lock.Lock() - defer c.lock.Unlock() - - if ent, ok := c.items[key]; ok { - c.removeElement(ent) - } -} - -// Keys returns a slice of the keys in the cache, from oldest to newest. -func (c *Cache) Keys() []interface{} { - c.lock.RLock() - defer c.lock.RUnlock() - - keys := make([]interface{}, c.len) - i := 0 - for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { - e := ent.Value.(*entry) - if e.generation == c.currentGeneration { - keys[i] = e.key - i++ - } - } - - return keys -} - -// Len returns the number of items in the cache. -func (c *Cache) Len() int { - c.lock.RLock() - defer c.lock.RUnlock() - return c.len -} - -// Name identifies this cache instance among others in the system. -func (c *Cache) Name() string { - return c.name -} - -// GetInvalidateClusterEvent returns the cluster event configured when this cache was created. -func (c *Cache) GetInvalidateClusterEvent() string { - return c.invalidateClusterEvent -} - -func (c *Cache) removeElement(e *list.Element) { - c.evictList.Remove(e) - kv := e.Value.(*entry) - if kv.generation == c.currentGeneration { - c.len-- - } - delete(c.items, kv.key) -} diff --git a/vendor/github.com/mattermost/mattermost-server/utils/path.go b/vendor/github.com/mattermost/mattermost-server/utils/path.go deleted file mode 100644 index 5f921b4..0000000 --- a/vendor/github.com/mattermost/mattermost-server/utils/path.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package utils - -import ( - "path/filepath" - "strings" -) - -// PathTraversesUpward will return true if the path attempts to traverse upwards by using -// ".." in the path. -func PathTraversesUpward(path string) bool { - return strings.HasPrefix(filepath.Clean(path), "..") -} diff --git a/vendor/github.com/mattermost/mattermost-server/utils/test_files_compiler.go b/vendor/github.com/mattermost/mattermost-server/utils/test_files_compiler.go deleted file mode 100644 index ccfcc50..0000000 --- a/vendor/github.com/mattermost/mattermost-server/utils/test_files_compiler.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package utils - -import ( - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strings" - "testing" - - "github.com/stretchr/testify/require" -) - -func goMod(t *testing.T, dir string, args ...string) { - cmd := exec.Command("go", append([]string{"mod"}, args...)...) - cmd.Dir = dir - output, err := cmd.CombinedOutput() - if err != nil { - t.Fatalf("Failed to %s: %s", strings.Join(args, " "), string(output)) - } -} - -func CompileGo(t *testing.T, sourceCode, outputPath string) { - dir, err := ioutil.TempDir(".", "") - require.NoError(t, err) - defer os.RemoveAll(dir) - - dir, err = filepath.Abs(dir) - require.NoError(t, err) - - // Write out main.go given the source code. - main := filepath.Join(dir, "main.go") - err = ioutil.WriteFile(main, []byte(sourceCode), 0600) - require.NoError(t, err) - - if os.Getenv("GO111MODULE") != "off" { - var mattermostServerPath string - - // Generate a go.mod file relying on the local copy of the mattermost-server. - // testlib linked mattermost-server into the general temporary directory for this test. - mattermostServerPath, err = filepath.Abs("mattermost-server") - require.NoError(t, err) - - goMod(t, dir, "init", "mattermost.com/test") - goMod(t, dir, "edit", "-require", "github.com/mattermost/mattermost-server@v0.0.0") - goMod(t, dir, "edit", "-replace", fmt.Sprintf("github.com/mattermost/mattermost-server@v0.0.0=%s", mattermostServerPath)) - goMod(t, dir, "edit", "-replace", fmt.Sprintf("git.apache.org/thrift.git=%s", "github.com/apache/thrift@v0.0.0-20180902110319-2566ecd5d999")) - } - - cmd := exec.Command("go", "build", "-o", outputPath, "main.go") - cmd.Dir = dir - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err = cmd.Run() - require.NoError(t, err, "failed to compile go") -} diff --git a/vendor/github.com/mattermost/mattermost-server/utils/utils.go b/vendor/github.com/mattermost/mattermost-server/utils/utils.go deleted file mode 100644 index 3af6ebe..0000000 --- a/vendor/github.com/mattermost/mattermost-server/utils/utils.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package utils - -import ( - "net" - "net/http" - "net/url" - "strings" -) - -func StringInSlice(a string, slice []string) bool { - for _, b := range slice { - if b == a { - return true - } - } - return false -} - -func StringArrayIntersection(arr1, arr2 []string) []string { - arrMap := map[string]bool{} - result := []string{} - - for _, value := range arr1 { - arrMap[value] = true - } - - for _, value := range arr2 { - if arrMap[value] { - result = append(result, value) - } - } - - return result -} - -func RemoveDuplicatesFromStringArray(arr []string) []string { - result := make([]string, 0, len(arr)) - seen := make(map[string]bool) - - for _, item := range arr { - if !seen[item] { - result = append(result, item) - seen[item] = true - } - } - - return result -} - -func StringSliceDiff(a, b []string) []string { - m := make(map[string]bool) - result := []string{} - - for _, item := range b { - m[item] = true - } - - for _, item := range a { - if !m[item] { - result = append(result, item) - } - } - return result -} - -func GetIpAddress(r *http.Request, trustedProxyIPHeader []string) string { - address := "" - - for _, proxyHeader := range trustedProxyIPHeader { - header := r.Header.Get(proxyHeader) - if len(header) > 0 { - addresses := strings.Fields(header) - if len(addresses) > 0 { - address = strings.TrimRight(addresses[0], ",") - } - } - - if len(address) > 0 { - return address - } - } - - if len(address) == 0 { - address, _, _ = net.SplitHostPort(r.RemoteAddr) - } - - return address -} - -func GetHostnameFromSiteURL(siteURL string) string { - u, err := url.Parse(siteURL) - if err != nil { - return "" - } - - return u.Hostname() -} diff --git a/vendor/github.com/mattermost/mattermost-server/LICENSE.txt b/vendor/github.com/mattermost/mattermost-server/v5/LICENSE.txt similarity index 99% rename from vendor/github.com/mattermost/mattermost-server/LICENSE.txt rename to vendor/github.com/mattermost/mattermost-server/v5/LICENSE.txt index b40b5e5..8382687 100644 --- a/vendor/github.com/mattermost/mattermost-server/LICENSE.txt +++ b/vendor/github.com/mattermost/mattermost-server/v5/LICENSE.txt @@ -11,7 +11,7 @@ You may be licensed to use source code to create compiled versions not produced 1. Under the Free Software Foundation’s GNU AGPL v.3.0, subject to the exceptions outlined in this policy; or 2. Under a commercial license available from Mattermost, Inc. by contacting commercial@mattermost.com -You are licensed to use the source code in Admin Tools and Configuration Files (templates/, config/default.json, model/, +You are licensed to use the source code in Admin Tools and Configuration Files (templates/, config/default.json, i18n/, model/, plugin/ and all subdirectories thereof) under the Apache License v2.0. We promise that we will not enforce the copyleft provisions in AGPL v3.0 against you if your application (a) does not diff --git a/vendor/github.com/mattermost/mattermost-server/NOTICE.txt b/vendor/github.com/mattermost/mattermost-server/v5/NOTICE.txt similarity index 85% rename from vendor/github.com/mattermost/mattermost-server/NOTICE.txt rename to vendor/github.com/mattermost/mattermost-server/v5/NOTICE.txt index 0316f70..0d3d8dd 100644 --- a/vendor/github.com/mattermost/mattermost-server/NOTICE.txt +++ b/vendor/github.com/mattermost/mattermost-server/v5/NOTICE.txt @@ -864,7 +864,7 @@ Mozilla Public License Version 2.0 means any form of the work other than Source Code Form. 1.7. "Larger Work" - means a work that combines Covered Software with other material, in + means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. 1.8. "License" @@ -1387,7 +1387,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. This product contains 'durafmt' by Wesley Hill. -:clock8: Better time duration formatting in Go! +:clock8: Better time duration formatting in Go! * HOMEPAGE: * https://github.com/hako/durafmt @@ -3657,7 +3657,7 @@ lumberjack is a log rolling package for Go The MIT License (MIT) -Copyright (c) 2014 Nate Finch +Copyright (c) 2014 Nate Finch Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -4132,3 +4132,713 @@ A caching, resizing image proxy written in Go of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS + +--- + +## oov/psd + +This product contains 'psd' by oov. + +A PSD/PSB file reader for go + +* HOMEPAGE: + * https://github.com/oov/psd + +* LICENSE: MIT + +MIT License + +Copyright (c) 2016 oov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--- + +## gopherjs + +This product contains 'gopherjs' by Richard Musiol. + +A Go code to javascript code compiler. + +* HOMEPAGE: + * https://github.com/gopherjs/gopherjs + +* LICENSE: + +Copyright (c) 2013 Richard Musiol. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +## AWS SDK for Go + +This product contains 'aws-sdk' by Amazon. + +AWS-SDK support for the Go language. + +* HOMEPAGE: + * https://github.com/aws/aws-sdk-go + +* LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +--- + +## semver + +This product contains 'semver' by Masterminds. + +The semver package provides the ability to work with Semantic Versions in Go. + +* HOMEPAGE: + * https://github.com/Masterminds/semver + +* LICENSE: + +Copyright (C) 2014-2019, Matt Butcher and Matt Farina + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +--- + +## Date Constraints + +This product contains 'dateconstraints' by Eli Yukelzon. + +Go library to validate a date against constraints + +* HOMEPAGE: + * https://github.com/reflog/dateconstraints + +* LICENSE: + +MIT License + +Copyright (c) 2020 Eli Yukelzon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--- + +## Archiver + +This product contains 'archiver' by Matthew Holt + +A library to handle direferen archive files (zip, rar, tar.gz...) + +* HOMEPAGE: + * https://github.com/mholt/archiver + +* LICENSE: + +MIT License + +Copyright (c) 2016 Matthew Holt + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--- + +## PDF Reader library + +This product contains 'pdf' by the Go team and modified by Thuc Le + +A library to provide pdf reading support + +* HOMEPAGE: + * https://github.com/ledongthuc/pdf + +* LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +--- + +## GoOse + +This product contains 'GoOse' by Antonio Linari + +A library to provide html text extraction support + +* HOMEPAGE: + * https://github.com/advancedlogic/GoOse + +* LICENSE: + +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +--- + +## Docconv + +This product contains 'docconv' by Sajari Pty Ltd + +A library to provide text extraction support for different documents + +* HOMEPAGE: + * https://github.com/sajari/docconv + +* LICENSE: + +The MIT License (MIT) + +Copyright (c) 2014 Sajari Pty Ltd + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +--- + +## JWT-Go + +This product contains `jwt-go` by Dave Grijalva + +* HOMEPAGE: + * https://github.com/dgrijalva/jwt-go + +* LICENSE: + +The MIT License (MIT) + +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit +persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of +the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/account_migration.go b/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/account_migration.go new file mode 100644 index 0000000..d16412c --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/account_migration.go @@ -0,0 +1,13 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package einterfaces + +import ( + "github.com/mattermost/mattermost-server/v5/model" +) + +type AccountMigrationInterface interface { + MigrateToLdap(fromAuthService string, forignUserFieldNameToMatch string, force bool, dryRun bool) *model.AppError + MigrateToSaml(fromAuthService string, usersMap map[string]string, auto bool, dryRun bool) *model.AppError +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/cloud.go b/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/cloud.go new file mode 100644 index 0000000..6c56cac --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/cloud.go @@ -0,0 +1,23 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package einterfaces + +import ( + "github.com/mattermost/mattermost-server/v5/model" +) + +type CloudInterface interface { + GetCloudProducts() ([]*model.Product, *model.AppError) + + CreateCustomerPayment() (*model.StripeSetupIntent, *model.AppError) + ConfirmCustomerPayment(*model.ConfirmPaymentMethodRequest) *model.AppError + + GetCloudCustomer() (*model.CloudCustomer, *model.AppError) + UpdateCloudCustomer(customerInfo *model.CloudCustomerInfo) (*model.CloudCustomer, *model.AppError) + UpdateCloudCustomerAddress(address *model.Address) (*model.CloudCustomer, *model.AppError) + + GetSubscription() (*model.Subscription, *model.AppError) + GetInvoicesForSubscription() ([]*model.Invoice, *model.AppError) + GetInvoicePDF(invoiceID string) ([]byte, string, *model.AppError) +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/cluster.go b/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/cluster.go new file mode 100644 index 0000000..e2b4bb1 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/cluster.go @@ -0,0 +1,30 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package einterfaces + +import ( + "github.com/mattermost/mattermost-server/v5/model" +) + +type ClusterMessageHandler func(msg *model.ClusterMessage) + +type ClusterInterface interface { + StartInterNodeCommunication() + StopInterNodeCommunication() + RegisterClusterMessageHandler(event string, crm ClusterMessageHandler) + GetClusterId() string + IsLeader() bool + // HealthScore returns a number which is indicative of how well an instance is meeting + // the soft real-time requirements of the protocol. Lower numbers are better, + // and zero means "totally healthy". + HealthScore() int + GetMyClusterInfo() *model.ClusterInfo + GetClusterInfos() []*model.ClusterInfo + SendClusterMessage(cluster *model.ClusterMessage) + NotifyMsg(buf []byte) + GetClusterStats() ([]*model.ClusterStats, *model.AppError) + GetLogs(page, perPage int) ([]string, *model.AppError) + GetPluginStatuses() (model.PluginStatuses, *model.AppError) + ConfigChanged(previousConfig *model.Config, newConfig *model.Config, sendToOtherServer bool) *model.AppError +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/compliance.go b/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/compliance.go new file mode 100644 index 0000000..8ff660e --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/compliance.go @@ -0,0 +1,13 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package einterfaces + +import ( + "github.com/mattermost/mattermost-server/v5/model" +) + +type ComplianceInterface interface { + StartComplianceDailyJob() + RunComplianceJob(job *model.Compliance) *model.AppError +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/data_retention.go b/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/data_retention.go new file mode 100644 index 0000000..99a1dd2 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/data_retention.go @@ -0,0 +1,12 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package einterfaces + +import ( + "github.com/mattermost/mattermost-server/v5/model" +) + +type DataRetentionInterface interface { + GetPolicy() (*model.DataRetentionPolicy, *model.AppError) +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/ldap.go b/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/ldap.go new file mode 100644 index 0000000..fb91792 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/ldap.go @@ -0,0 +1,26 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package einterfaces + +import ( + "github.com/mattermost/mattermost-server/v5/model" +) + +type LdapInterface interface { + DoLogin(id string, password string) (*model.User, *model.AppError) + GetUser(id string) (*model.User, *model.AppError) + GetUserAttributes(id string, attributes []string) (map[string]string, *model.AppError) + CheckPassword(id string, password string) *model.AppError + CheckPasswordAuthData(authData string, password string) *model.AppError + SwitchToLdap(userId, ldapId, ldapPassword string) *model.AppError + StartSynchronizeJob(waitForJobToFinish bool) (*model.Job, *model.AppError) + RunTest() *model.AppError + GetAllLdapUsers() ([]*model.User, *model.AppError) + MigrateIDAttribute(toAttribute string) error + GetGroup(groupUID string) (*model.Group, *model.AppError) + GetAllGroupsPage(page int, perPage int, opts model.LdapGroupSearchOpts) ([]*model.Group, int, *model.AppError) + FirstLoginSync(user *model.User, userAuthService, userAuthData, email string) *model.AppError + UpdateProfilePictureIfNecessary(model.User, model.Session) + GetADLdapIdFromSAMLId(authData string) string +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/message_export.go b/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/message_export.go new file mode 100644 index 0000000..02a1ffd --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/message_export.go @@ -0,0 +1,15 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package einterfaces + +import ( + "context" + + "github.com/mattermost/mattermost-server/v5/model" +) + +type MessageExportInterface interface { + StartSynchronizeJob(ctx context.Context, exportFromTimestamp int64) (*model.Job, *model.AppError) + RunExport(format string, since int64) (int64, *model.AppError) +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/metrics.go b/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/metrics.go new file mode 100644 index 0000000..a2ae546 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/metrics.go @@ -0,0 +1,69 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package einterfaces + +import ( + "github.com/mattermost/logr" +) + +type MetricsInterface interface { + StartServer() + StopServer() + + IncrementPostCreate() + IncrementWebhookPost() + IncrementPostSentEmail() + IncrementPostSentPush() + IncrementPostBroadcast() + IncrementPostFileAttachment(count int) + + IncrementHttpRequest() + IncrementHttpError() + + IncrementClusterRequest() + ObserveClusterRequestDuration(elapsed float64) + IncrementClusterEventType(eventType string) + + IncrementLogin() + IncrementLoginFail() + + IncrementEtagHitCounter(route string) + IncrementEtagMissCounter(route string) + + IncrementMemCacheHitCounter(cacheName string) + IncrementMemCacheMissCounter(cacheName string) + IncrementMemCacheInvalidationCounter(cacheName string) + IncrementMemCacheMissCounterSession() + IncrementMemCacheHitCounterSession() + IncrementMemCacheInvalidationCounterSession() + + IncrementWebsocketEvent(eventType string) + IncrementWebSocketBroadcast(eventType string) + IncrementWebSocketBroadcastBufferSize(hub string, amount float64) + DecrementWebSocketBroadcastBufferSize(hub string, amount float64) + IncrementWebSocketBroadcastUsersRegistered(hub string, amount float64) + DecrementWebSocketBroadcastUsersRegistered(hub string, amount float64) + + AddMemCacheHitCounter(cacheName string, amount float64) + AddMemCacheMissCounter(cacheName string, amount float64) + + IncrementPostsSearchCounter() + ObservePostsSearchDuration(elapsed float64) + IncrementFilesSearchCounter() + ObserveFilesSearchDuration(elapsed float64) + ObserveStoreMethodDuration(method, success string, elapsed float64) + ObserveApiEndpointDuration(endpoint, method, statusCode string, elapsed float64) + IncrementPostIndexCounter() + IncrementFileIndexCounter() + IncrementUserIndexCounter() + IncrementChannelIndexCounter() + + ObservePluginHookDuration(pluginID, hookName string, success bool, elapsed float64) + ObservePluginMultiHookIterationDuration(pluginID string, elapsed float64) + ObservePluginMultiHookDuration(elapsed float64) + ObservePluginApiDuration(pluginID, apiName string, success bool, elapsed float64) + + ObserveEnabledUsers(users int64) + GetLoggerMetricsCollector() logr.MetricsCollector +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/mfa.go b/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/mfa.go new file mode 100644 index 0000000..b42aae7 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/mfa.go @@ -0,0 +1,15 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package einterfaces + +import ( + "github.com/mattermost/mattermost-server/v5/model" +) + +type MfaInterface interface { + GenerateSecret(user *model.User) (string, []byte, *model.AppError) + Activate(user *model.User, token string) *model.AppError + Deactivate(userId string) *model.AppError + ValidateToken(secret, token string) (bool, *model.AppError) +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/notification.go b/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/notification.go new file mode 100644 index 0000000..05a07a3 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/notification.go @@ -0,0 +1,13 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package einterfaces + +import ( + "github.com/mattermost/mattermost-server/v5/model" +) + +type NotificationInterface interface { + GetNotificationMessage(ack *model.PushNotificationAck, userId string) (*model.PushNotification, *model.AppError) + CheckLicense() *model.AppError +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/oauthproviders.go b/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/oauthproviders.go new file mode 100644 index 0000000..def190c --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/oauthproviders.go @@ -0,0 +1,30 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package einterfaces + +import ( + "io" + + "github.com/mattermost/mattermost-server/v5/model" +) + +type OauthProvider interface { + GetUserFromJson(data io.Reader, tokenUser *model.User) (*model.User, error) + GetSSOSettings(config *model.Config, service string) (*model.SSOSettings, error) + GetUserFromIdToken(idToken string) (*model.User, error) +} + +var oauthProviders = make(map[string]OauthProvider) + +func RegisterOauthProvider(name string, newProvider OauthProvider) { + oauthProviders[name] = newProvider +} + +func GetOauthProvider(name string) OauthProvider { + provider, ok := oauthProviders[name] + if ok { + return provider + } + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/saml.go b/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/saml.go new file mode 100644 index 0000000..157ab01 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/einterfaces/saml.go @@ -0,0 +1,15 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package einterfaces + +import ( + "github.com/mattermost/mattermost-server/v5/model" +) + +type SamlInterface interface { + ConfigureSP() error + BuildRequest(relayState string) (*model.SamlAuthRequest, *model.AppError) + DoLogin(encodedXML string, relayState map[string]string) (*model.User, *model.AppError) + GetMetadata() (string, *model.AppError) +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/default.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/default.go new file mode 100644 index 0000000..1e409b1 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/default.go @@ -0,0 +1,95 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package mlog + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + + "github.com/mattermost/logr" +) + +// defaultLog manually encodes the log to STDERR, providing a basic, default logging implementation +// before mlog is fully configured. +func defaultLog(level, msg string, fields ...Field) { + log := struct { + Level string `json:"level"` + Message string `json:"msg"` + Fields []Field `json:"fields,omitempty"` + }{ + level, + msg, + fields, + } + + if b, err := json.Marshal(log); err != nil { + fmt.Fprintf(os.Stderr, `{"level":"error","msg":"failed to encode log message"}%s`, "\n") + } else { + fmt.Fprintf(os.Stderr, "%s\n", b) + } +} + +func defaultDebugLog(msg string, fields ...Field) { + defaultLog("debug", msg, fields...) +} + +func defaultInfoLog(msg string, fields ...Field) { + defaultLog("info", msg, fields...) +} + +func defaultWarnLog(msg string, fields ...Field) { + defaultLog("warn", msg, fields...) +} + +func defaultErrorLog(msg string, fields ...Field) { + defaultLog("error", msg, fields...) +} + +func defaultCriticalLog(msg string, fields ...Field) { + // We map critical to error in zap, so be consistent. + defaultLog("error", msg, fields...) +} + +func defaultCustomLog(lvl LogLevel, msg string, fields ...Field) { + // custom log levels are only output once log targets are configured. +} + +func defaultCustomMultiLog(lvl []LogLevel, msg string, fields ...Field) { + // custom log levels are only output once log targets are configured. +} + +func defaultFlush(ctx context.Context) error { + return nil +} + +func defaultAdvancedConfig(cfg LogTargetCfg) error { + // mlog.ConfigAdvancedConfig should not be called until default + // logger is replaced with mlog.Logger instance. + return errors.New("cannot config advanced logging on default logger") +} + +func defaultAdvancedShutdown(ctx context.Context) error { + return nil +} + +func defaultAddTarget(targets ...logr.Target) error { + // mlog.AddTarget should not be called until default + // logger is replaced with mlog.Logger instance. + return errors.New("cannot AddTarget on default logger") +} + +func defaultRemoveTargets(ctx context.Context, f func(TargetInfo) bool) error { + // mlog.RemoveTargets should not be called until default + // logger is replaced with mlog.Logger instance. + return errors.New("cannot RemoveTargets on default logger") +} + +func defaultEnableMetrics(collector logr.MetricsCollector) error { + // mlog.EnableMetrics should not be called until default + // logger is replaced with mlog.Logger instance. + return errors.New("cannot EnableMetrics on default logger") +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/errors.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/errors.go new file mode 100644 index 0000000..93762fd --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/errors.go @@ -0,0 +1,32 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package mlog + +import ( + "github.com/mattermost/logr" +) + +// onLoggerError is called when the logging system encounters an error, +// such as a target not able to write records. The targets will keep trying +// however the error will be logged with a dedicated level that can be output +// to a safe/always available target for monitoring or alerting. +func onLoggerError(err error) { + Log(LvlLogError, "advanced logging error", Err(err)) +} + +// onQueueFull is called when the main logger queue is full, indicating the +// volume and frequency of log record creation is too high for the queue size +// and/or the target latencies. +func onQueueFull(rec *logr.LogRec, maxQueueSize int) bool { + Log(LvlLogError, "main queue full, dropping record", Any("rec", rec)) + return true // drop record +} + +// onTargetQueueFull is called when the main logger queue is full, indicating the +// volume and frequency of log record creation is too high for the target's queue size +// and/or the target latency. +func onTargetQueueFull(target logr.Target, rec *logr.LogRec, maxQueueSize int) bool { + Log(LvlLogError, "target queue full, dropping record", String("target", ""), Any("rec", rec)) + return true // drop record +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/global.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/global.go new file mode 100644 index 0000000..2986d92 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/global.go @@ -0,0 +1,95 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package mlog + +import ( + "context" + "log" + "sync/atomic" + + "github.com/mattermost/logr" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +var globalLogger *Logger + +func InitGlobalLogger(logger *Logger) { + // Clean up previous instance. + if globalLogger != nil && globalLogger.logrLogger != nil { + globalLogger.logrLogger.Logr().Shutdown() + } + glob := *logger + glob.zap = glob.zap.WithOptions(zap.AddCallerSkip(1)) + globalLogger = &glob + Debug = globalLogger.Debug + Info = globalLogger.Info + Warn = globalLogger.Warn + Error = globalLogger.Error + Critical = globalLogger.Critical + Log = globalLogger.Log + LogM = globalLogger.LogM + Flush = globalLogger.Flush + ConfigAdvancedLogging = globalLogger.ConfigAdvancedLogging + ShutdownAdvancedLogging = globalLogger.ShutdownAdvancedLogging + AddTarget = globalLogger.AddTarget + RemoveTargets = globalLogger.RemoveTargets + EnableMetrics = globalLogger.EnableMetrics +} + +// logWriterFunc provides access to mlog via io.Writer, so the standard logger +// can be redirected to use mlog and whatever targets are defined. +type logWriterFunc func([]byte) (int, error) + +func (lw logWriterFunc) Write(p []byte) (int, error) { + return lw(p) +} + +func RedirectStdLog(logger *Logger) { + if atomic.LoadInt32(&disableZap) == 0 { + zap.RedirectStdLogAt(logger.zap.With(zap.String("source", "stdlog")).WithOptions(zap.AddCallerSkip(-2)), zapcore.ErrorLevel) + return + } + + writer := func(p []byte) (int, error) { + Log(LvlStdLog, string(p)) + return len(p), nil + } + log.SetOutput(logWriterFunc(writer)) +} + +type LogFunc func(string, ...Field) +type LogFuncCustom func(LogLevel, string, ...Field) +type LogFuncCustomMulti func([]LogLevel, string, ...Field) +type FlushFunc func(context.Context) error +type ConfigFunc func(cfg LogTargetCfg) error +type ShutdownFunc func(context.Context) error +type AddTargetFunc func(...logr.Target) error +type RemoveTargetsFunc func(context.Context, func(TargetInfo) bool) error +type EnableMetricsFunc func(logr.MetricsCollector) error + +// DON'T USE THIS Modify the level on the app logger +func GloballyDisableDebugLogForTest() { + globalLogger.consoleLevel.SetLevel(zapcore.ErrorLevel) +} + +// DON'T USE THIS Modify the level on the app logger +func GloballyEnableDebugLogForTest() { + globalLogger.consoleLevel.SetLevel(zapcore.DebugLevel) +} + +var Debug LogFunc = defaultDebugLog +var Info LogFunc = defaultInfoLog +var Warn LogFunc = defaultWarnLog +var Error LogFunc = defaultErrorLog +var Critical LogFunc = defaultCriticalLog +var Log LogFuncCustom = defaultCustomLog +var LogM LogFuncCustomMulti = defaultCustomMultiLog +var Flush FlushFunc = defaultFlush + +var ConfigAdvancedLogging ConfigFunc = defaultAdvancedConfig +var ShutdownAdvancedLogging ShutdownFunc = defaultAdvancedShutdown +var AddTarget AddTargetFunc = defaultAddTarget +var RemoveTargets RemoveTargetsFunc = defaultRemoveTargets +var EnableMetrics EnableMetricsFunc = defaultEnableMetrics diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/levels.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/levels.go new file mode 100644 index 0000000..54bd254 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/levels.go @@ -0,0 +1,39 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package mlog + +// Standard levels +var ( + LvlPanic = LogLevel{ID: 0, Name: "panic", Stacktrace: true} + LvlFatal = LogLevel{ID: 1, Name: "fatal", Stacktrace: true} + LvlError = LogLevel{ID: 2, Name: "error"} + LvlWarn = LogLevel{ID: 3, Name: "warn"} + LvlInfo = LogLevel{ID: 4, Name: "info"} + LvlDebug = LogLevel{ID: 5, Name: "debug"} + LvlTrace = LogLevel{ID: 6, Name: "trace"} + // used by redirected standard logger + LvlStdLog = LogLevel{ID: 10, Name: "stdlog"} + // used only by the logger + LvlLogError = LogLevel{ID: 11, Name: "logerror", Stacktrace: true} +) + +// Register custom (discrete) levels here. +// !!!!! ID's must not exceed 32,768 !!!!!! +var ( + // used by the audit system + LvlAuditAPI = LogLevel{ID: 100, Name: "audit-api"} + LvlAuditContent = LogLevel{ID: 101, Name: "audit-content"} + LvlAuditPerms = LogLevel{ID: 102, Name: "audit-permissions"} + LvlAuditCLI = LogLevel{ID: 103, Name: "audit-cli"} + + // used by the TCP log target + LvlTcpLogTarget = LogLevel{ID: 120, Name: "TcpLogTarget"} + + // add more here ... +) + +// Combinations for LogM (log multi) +var ( + MLvlAuditAll = []LogLevel{LvlAuditAPI, LvlAuditContent, LvlAuditPerms, LvlAuditCLI} +) diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/log.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/log.go new file mode 100644 index 0000000..6395240 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/log.go @@ -0,0 +1,342 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package mlog + +import ( + "context" + "fmt" + "io" + "log" + "os" + "sync" + "sync/atomic" + "time" + + "github.com/mattermost/logr" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "gopkg.in/natefinch/lumberjack.v2" +) + +const ( + // Very verbose messages for debugging specific issues + LevelDebug = "debug" + // Default log level, informational + LevelInfo = "info" + // Warnings are messages about possible issues + LevelWarn = "warn" + // Errors are messages about things we know are problems + LevelError = "error" + + // DefaultFlushTimeout is the default amount of time mlog.Flush will wait + // before timing out. + DefaultFlushTimeout = time.Second * 5 +) + +var ( + // disableZap is set when Zap should be disabled and Logr used instead. + // This is needed for unit testing as Zap has no shutdown capabilities + // and holds file handles until process exit. Currently unit test create + // many server instances, and thus many Zap log files. + // This flag will be removed when Zap is permanently replaced. + disableZap int32 +) + +// Type and function aliases from zap to limit the libraries scope into MM code +type Field = zapcore.Field + +var Int64 = zap.Int64 +var Int32 = zap.Int32 +var Int = zap.Int +var Uint32 = zap.Uint32 +var String = zap.String +var Any = zap.Any +var Err = zap.Error +var NamedErr = zap.NamedError +var Bool = zap.Bool +var Duration = zap.Duration + +type TargetInfo logr.TargetInfo + +type LoggerConfiguration struct { + EnableConsole bool + ConsoleJson bool + ConsoleLevel string + EnableFile bool + FileJson bool + FileLevel string + FileLocation string +} + +type Logger struct { + zap *zap.Logger + consoleLevel zap.AtomicLevel + fileLevel zap.AtomicLevel + logrLogger *logr.Logger + mutex *sync.RWMutex +} + +func getZapLevel(level string) zapcore.Level { + switch level { + case LevelInfo: + return zapcore.InfoLevel + case LevelWarn: + return zapcore.WarnLevel + case LevelDebug: + return zapcore.DebugLevel + case LevelError: + return zapcore.ErrorLevel + default: + return zapcore.InfoLevel + } +} + +func makeEncoder(json bool) zapcore.Encoder { + encoderConfig := zap.NewProductionEncoderConfig() + if json { + return zapcore.NewJSONEncoder(encoderConfig) + } + + encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + return zapcore.NewConsoleEncoder(encoderConfig) +} + +func NewLogger(config *LoggerConfiguration) *Logger { + cores := []zapcore.Core{} + logger := &Logger{ + consoleLevel: zap.NewAtomicLevelAt(getZapLevel(config.ConsoleLevel)), + fileLevel: zap.NewAtomicLevelAt(getZapLevel(config.FileLevel)), + logrLogger: newLogr(), + mutex: &sync.RWMutex{}, + } + + if config.EnableConsole { + writer := zapcore.Lock(os.Stderr) + core := zapcore.NewCore(makeEncoder(config.ConsoleJson), writer, logger.consoleLevel) + cores = append(cores, core) + } + + if config.EnableFile { + if atomic.LoadInt32(&disableZap) != 0 { + t := &LogTarget{ + Type: "file", + Format: "json", + Levels: mlogLevelToLogrLevels(config.FileLevel), + MaxQueueSize: DefaultMaxTargetQueue, + Options: []byte(fmt.Sprintf(`{"Filename":"%s", "MaxSizeMB":%d, "Compress":%t}`, + config.FileLocation, 100, true)), + } + if !config.FileJson { + t.Format = "plain" + } + if tgt, err := NewLogrTarget("mlogFile", t); err == nil { + logger.logrLogger.Logr().AddTarget(tgt) + } else { + Error("error creating mlogFile", Err(err)) + } + } else { + writer := zapcore.AddSync(&lumberjack.Logger{ + Filename: config.FileLocation, + MaxSize: 100, + Compress: true, + }) + + core := zapcore.NewCore(makeEncoder(config.FileJson), writer, logger.fileLevel) + cores = append(cores, core) + } + } + + combinedCore := zapcore.NewTee(cores...) + + logger.zap = zap.New(combinedCore, + zap.AddCaller(), + ) + return logger +} + +func (l *Logger) ChangeLevels(config *LoggerConfiguration) { + l.consoleLevel.SetLevel(getZapLevel(config.ConsoleLevel)) + l.fileLevel.SetLevel(getZapLevel(config.FileLevel)) +} + +func (l *Logger) SetConsoleLevel(level string) { + l.consoleLevel.SetLevel(getZapLevel(level)) +} + +func (l *Logger) With(fields ...Field) *Logger { + newLogger := *l + newLogger.zap = newLogger.zap.With(fields...) + if newLogger.getLogger() != nil { + ll := newLogger.getLogger().WithFields(zapToLogr(fields)) + newLogger.logrLogger = &ll + } + return &newLogger +} + +func (l *Logger) StdLog(fields ...Field) *log.Logger { + return zap.NewStdLog(l.With(fields...).zap.WithOptions(getStdLogOption())) +} + +// StdLogAt returns *log.Logger which writes to supplied zap logger at required level. +func (l *Logger) StdLogAt(level string, fields ...Field) (*log.Logger, error) { + return zap.NewStdLogAt(l.With(fields...).zap.WithOptions(getStdLogOption()), getZapLevel(level)) +} + +// StdLogWriter returns a writer that can be hooked up to the output of a golang standard logger +// anything written will be interpreted as log entries accordingly +func (l *Logger) StdLogWriter() io.Writer { + newLogger := *l + newLogger.zap = newLogger.zap.WithOptions(zap.AddCallerSkip(4), getStdLogOption()) + f := newLogger.Info + return &loggerWriter{f} +} + +func (l *Logger) WithCallerSkip(skip int) *Logger { + newLogger := *l + newLogger.zap = newLogger.zap.WithOptions(zap.AddCallerSkip(skip)) + return &newLogger +} + +// Made for the plugin interface, wraps mlog in a simpler interface +// at the cost of performance +func (l *Logger) Sugar() *SugarLogger { + return &SugarLogger{ + wrappedLogger: l, + zapSugar: l.zap.Sugar(), + } +} + +func (l *Logger) Debug(message string, fields ...Field) { + l.zap.Debug(message, fields...) + if isLevelEnabled(l.getLogger(), logr.Debug) { + l.getLogger().WithFields(zapToLogr(fields)).Debug(message) + } +} + +func (l *Logger) Info(message string, fields ...Field) { + l.zap.Info(message, fields...) + if isLevelEnabled(l.getLogger(), logr.Info) { + l.getLogger().WithFields(zapToLogr(fields)).Info(message) + } +} + +func (l *Logger) Warn(message string, fields ...Field) { + l.zap.Warn(message, fields...) + if isLevelEnabled(l.getLogger(), logr.Warn) { + l.getLogger().WithFields(zapToLogr(fields)).Warn(message) + } +} + +func (l *Logger) Error(message string, fields ...Field) { + l.zap.Error(message, fields...) + if isLevelEnabled(l.getLogger(), logr.Error) { + l.getLogger().WithFields(zapToLogr(fields)).Error(message) + } +} + +func (l *Logger) Critical(message string, fields ...Field) { + l.zap.Error(message, fields...) + if isLevelEnabled(l.getLogger(), logr.Error) { + l.getLogger().WithFields(zapToLogr(fields)).Error(message) + } +} + +func (l *Logger) Log(level LogLevel, message string, fields ...Field) { + l.getLogger().WithFields(zapToLogr(fields)).Log(logr.Level(level), message) +} + +func (l *Logger) LogM(levels []LogLevel, message string, fields ...Field) { + var logger *logr.Logger + for _, lvl := range levels { + if isLevelEnabled(l.getLogger(), logr.Level(lvl)) { + // don't create logger with fields unless at least one level is active. + if logger == nil { + l := l.getLogger().WithFields(zapToLogr(fields)) + logger = &l + } + logger.Log(logr.Level(lvl), message) + } + } +} + +func (l *Logger) Flush(cxt context.Context) error { + return l.getLogger().Logr().FlushWithTimeout(cxt) +} + +// ShutdownAdvancedLogging stops the logger from accepting new log records and tries to +// flush queues within the context timeout. Once complete all targets are shutdown +// and any resources released. +func (l *Logger) ShutdownAdvancedLogging(cxt context.Context) error { + err := l.getLogger().Logr().ShutdownWithTimeout(cxt) + l.setLogger(newLogr()) + return err +} + +// ConfigAdvancedLoggingConfig (re)configures advanced logging based on the +// specified log targets. This is the easiest way to get the advanced logger +// configured via a config source such as file. +func (l *Logger) ConfigAdvancedLogging(targets LogTargetCfg) error { + if err := l.ShutdownAdvancedLogging(context.Background()); err != nil { + Error("error shutting down previous logger", Err(err)) + } + + err := logrAddTargets(l.getLogger(), targets) + return err +} + +// AddTarget adds one or more logr.Target to the advanced logger. This is the preferred method +// to add custom targets or provide configuration that cannot be expressed via a +// config source. +func (l *Logger) AddTarget(targets ...logr.Target) error { + return l.getLogger().Logr().AddTarget(targets...) +} + +// RemoveTargets selectively removes targets that were previously added to this logger instance +// using the passed in filter function. The filter function should return true to remove the target +// and false to keep it. +func (l *Logger) RemoveTargets(ctx context.Context, f func(ti TargetInfo) bool) error { + // Use locally defined TargetInfo type so we don't spread Logr dependencies. + fc := func(tic logr.TargetInfo) bool { + return f(TargetInfo(tic)) + } + return l.getLogger().Logr().RemoveTargets(ctx, fc) +} + +// EnableMetrics enables metrics collection by supplying a MetricsCollector. +// The MetricsCollector provides counters and gauges that are updated by log targets. +func (l *Logger) EnableMetrics(collector logr.MetricsCollector) error { + return l.getLogger().Logr().SetMetricsCollector(collector) +} + +// getLogger is a concurrent safe getter of the logr logger +func (l *Logger) getLogger() *logr.Logger { + defer l.mutex.RUnlock() + l.mutex.RLock() + return l.logrLogger +} + +// setLogger is a concurrent safe setter of the logr logger +func (l *Logger) setLogger(logger *logr.Logger) { + defer l.mutex.Unlock() + l.mutex.Lock() + l.logrLogger = logger +} + +// DisableZap is called to disable Zap, and Logr will be used instead. Any Logger +// instances created after this call will only use Logr. +// +// This is needed for unit testing as Zap has no shutdown capabilities +// and holds file handles until process exit. Currently unit tests create +// many server instances, and thus many Zap log file handles. +// +// This method will be removed when Zap is permanently replaced. +func DisableZap() { + atomic.StoreInt32(&disableZap, 1) +} + +// EnableZap re-enables Zap such that any Logger instances created after this +// call will allow Zap targets. +func EnableZap() { + atomic.StoreInt32(&disableZap, 0) +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/logr.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/logr.go new file mode 100644 index 0000000..01b3902 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/logr.go @@ -0,0 +1,247 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package mlog + +import ( + "encoding/json" + "fmt" + "io" + "os" + + "github.com/hashicorp/go-multierror" + "github.com/mattermost/logr" + logrFmt "github.com/mattermost/logr/format" + "github.com/mattermost/logr/target" + "go.uber.org/zap/zapcore" +) + +const ( + DefaultMaxTargetQueue = 1000 + DefaultSysLogPort = 514 +) + +type LogLevel struct { + ID logr.LevelID + Name string + Stacktrace bool +} + +type LogTarget struct { + Type string // one of "console", "file", "tcp", "syslog", "none". + Format string // one of "json", "plain" + Levels []LogLevel + Options json.RawMessage + MaxQueueSize int +} + +type LogTargetCfg map[string]*LogTarget +type LogrCleanup func() error + +func newLogr() *logr.Logger { + lgr := &logr.Logr{} + lgr.OnExit = func(int) {} + lgr.OnPanic = func(interface{}) {} + lgr.OnLoggerError = onLoggerError + lgr.OnQueueFull = onQueueFull + lgr.OnTargetQueueFull = onTargetQueueFull + + logger := lgr.NewLogger() + return &logger +} + +func logrAddTargets(logger *logr.Logger, targets LogTargetCfg) error { + lgr := logger.Logr() + var errs error + for name, t := range targets { + target, err := NewLogrTarget(name, t) + if err != nil { + errs = multierror.Append(err) + continue + } + if target != nil { + target.SetName(name) + lgr.AddTarget(target) + } + } + return errs +} + +// NewLogrTarget creates a `logr.Target` based on a target config. +// Can be used when parsing custom config files, or when programmatically adding +// built-in targets. Use `mlog.AddTarget` to add custom targets. +func NewLogrTarget(name string, t *LogTarget) (logr.Target, error) { + formatter, err := newFormatter(name, t.Format) + if err != nil { + return nil, err + } + filter, err := newFilter(name, t.Levels) + if err != nil { + return nil, err + } + + if t.MaxQueueSize == 0 { + t.MaxQueueSize = DefaultMaxTargetQueue + } + + switch t.Type { + case "console": + return newConsoleTarget(name, t, filter, formatter) + case "file": + return newFileTarget(name, t, filter, formatter) + case "syslog": + return newSyslogTarget(name, t, filter, formatter) + case "tcp": + return newTCPTarget(name, t, filter, formatter) + case "none": + return nil, nil + } + return nil, fmt.Errorf("invalid type '%s' for target %s", t.Type, name) +} + +func newFilter(name string, levels []LogLevel) (logr.Filter, error) { + filter := &logr.CustomFilter{} + for _, lvl := range levels { + filter.Add(logr.Level(lvl)) + } + return filter, nil +} + +func newFormatter(name string, format string) (logr.Formatter, error) { + switch format { + case "json", "": + return &logrFmt.JSON{}, nil + case "plain": + return &logrFmt.Plain{Delim: " | "}, nil + default: + return nil, fmt.Errorf("invalid format '%s' for target %s", format, name) + } +} + +func newConsoleTarget(name string, t *LogTarget, filter logr.Filter, formatter logr.Formatter) (logr.Target, error) { + type consoleOptions struct { + Out string `json:"Out"` + } + options := &consoleOptions{} + if err := json.Unmarshal(t.Options, options); err != nil { + return nil, err + } + + var w io.Writer + switch options.Out { + case "stdout", "": + w = os.Stdout + case "stderr": + w = os.Stderr + default: + return nil, fmt.Errorf("invalid out '%s' for target %s", options.Out, name) + } + + newTarget := target.NewWriterTarget(filter, formatter, w, t.MaxQueueSize) + return newTarget, nil +} + +func newFileTarget(name string, t *LogTarget, filter logr.Filter, formatter logr.Formatter) (logr.Target, error) { + type fileOptions struct { + Filename string `json:"Filename"` + MaxSize int `json:"MaxSizeMB"` + MaxAge int `json:"MaxAgeDays"` + MaxBackups int `json:"MaxBackups"` + Compress bool `json:"Compress"` + } + options := &fileOptions{} + if err := json.Unmarshal(t.Options, options); err != nil { + return nil, err + } + return newFileTargetWithOpts(name, t, target.FileOptions(*options), filter, formatter) +} + +func newFileTargetWithOpts(name string, t *LogTarget, opts target.FileOptions, filter logr.Filter, formatter logr.Formatter) (logr.Target, error) { + if opts.Filename == "" { + return nil, fmt.Errorf("missing 'Filename' option for target %s", name) + } + if err := checkFileWritable(opts.Filename); err != nil { + return nil, fmt.Errorf("error writing to 'Filename' for target %s: %w", name, err) + } + + newTarget := target.NewFileTarget(filter, formatter, opts, t.MaxQueueSize) + return newTarget, nil +} + +func newSyslogTarget(name string, t *LogTarget, filter logr.Filter, formatter logr.Formatter) (logr.Target, error) { + options := &SyslogParams{} + if err := json.Unmarshal(t.Options, options); err != nil { + return nil, err + } + + if options.IP == "" { + return nil, fmt.Errorf("missing 'IP' option for target %s", name) + } + if options.Port == 0 { + options.Port = DefaultSysLogPort + } + return NewSyslogTarget(filter, formatter, options, t.MaxQueueSize) +} + +func newTCPTarget(name string, t *LogTarget, filter logr.Filter, formatter logr.Formatter) (logr.Target, error) { + options := &TcpParams{} + if err := json.Unmarshal(t.Options, options); err != nil { + return nil, err + } + + if options.IP == "" { + return nil, fmt.Errorf("missing 'IP' option for target %s", name) + } + if options.Port == 0 { + return nil, fmt.Errorf("missing 'Port' option for target %s", name) + } + return NewTcpTarget(filter, formatter, options, t.MaxQueueSize) +} + +func checkFileWritable(filename string) error { + // try opening/creating the file for writing + file, err := os.OpenFile(filename, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600) + if err != nil { + return err + } + file.Close() + return nil +} + +func isLevelEnabled(logger *logr.Logger, level logr.Level) bool { + if logger == nil || logger.Logr() == nil { + return false + } + + status := logger.Logr().IsLevelEnabled(level) + return status.Enabled +} + +// zapToLogr converts Zap fields to Logr fields. +// This will not be needed once Logr is used for all logging. +func zapToLogr(zapFields []Field) logr.Fields { + encoder := zapcore.NewMapObjectEncoder() + for _, zapField := range zapFields { + zapField.AddTo(encoder) + } + return logr.Fields(encoder.Fields) +} + +// mlogLevelToLogrLevel converts a mlog logger level to +// an array of discrete Logr levels. +func mlogLevelToLogrLevels(level string) []LogLevel { + levels := make([]LogLevel, 0) + levels = append(levels, LvlError, LvlPanic, LvlFatal, LvlStdLog) + + switch level { + case LevelDebug: + levels = append(levels, LvlDebug) + fallthrough + case LevelInfo: + levels = append(levels, LvlInfo) + fallthrough + case LevelWarn: + levels = append(levels, LvlWarn) + } + return levels +} diff --git a/vendor/github.com/mattermost/mattermost-server/mlog/stdlog.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/stdlog.go similarity index 97% rename from vendor/github.com/mattermost/mattermost-server/mlog/stdlog.go rename to vendor/github.com/mattermost/mattermost-server/v5/mlog/stdlog.go index 7839ddf..fd702ab 100644 --- a/vendor/github.com/mattermost/mattermost-server/mlog/stdlog.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/stdlog.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package mlog @@ -81,7 +81,7 @@ type loggerWriter struct { func (l *loggerWriter) Write(p []byte) (int, error) { trimmed := string(bytes.TrimSpace(p)) for _, line := range strings.Split(trimmed, "\n") { - l.logFunc(string(line)) + l.logFunc(line) } return len(p), nil } diff --git a/vendor/github.com/mattermost/mattermost-server/mlog/sugar.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/sugar.go similarity index 96% rename from vendor/github.com/mattermost/mattermost-server/mlog/sugar.go rename to vendor/github.com/mattermost/mattermost-server/v5/mlog/sugar.go index c00a8bb..2368b08 100644 --- a/vendor/github.com/mattermost/mattermost-server/mlog/sugar.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/sugar.go @@ -3,7 +3,9 @@ package mlog -import "go.uber.org/zap" +import ( + "go.uber.org/zap" +) // Made for the plugin interface, use the regular logger for other uses type SugarLogger struct { diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/syslog.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/syslog.go new file mode 100644 index 0000000..8766a96 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/syslog.go @@ -0,0 +1,142 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package mlog + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "io/ioutil" + + "github.com/mattermost/logr" + "github.com/wiggin77/merror" + syslog "github.com/wiggin77/srslog" +) + +// Syslog outputs log records to local or remote syslog. +type Syslog struct { + logr.Basic + w *syslog.Writer +} + +// SyslogParams provides parameters for dialing a syslog daemon. +type SyslogParams struct { + IP string `json:"IP"` + Port int `json:"Port"` + Tag string `json:"Tag"` + TLS bool `json:"TLS"` + Cert string `json:"Cert"` + Insecure bool `json:"Insecure"` +} + +// NewSyslogTarget creates a target capable of outputting log records to remote or local syslog, with or without TLS. +func NewSyslogTarget(filter logr.Filter, formatter logr.Formatter, params *SyslogParams, maxQueue int) (*Syslog, error) { + network := "tcp" + var config *tls.Config + + if params.TLS { + network = "tcp+tls" + config = &tls.Config{InsecureSkipVerify: params.Insecure} + if params.Cert != "" { + pool, err := getCertPool(params.Cert) + if err != nil { + return nil, err + } + config.RootCAs = pool + } + } + raddr := fmt.Sprintf("%s:%d", params.IP, params.Port) + + writer, err := syslog.DialWithTLSConfig(network, raddr, syslog.LOG_INFO, params.Tag, config) + if err != nil { + return nil, err + } + + s := &Syslog{w: writer} + s.Basic.Start(s, s, filter, formatter, maxQueue) + + return s, nil +} + +// Shutdown stops processing log records after making best effort to flush queue. +func (s *Syslog) Shutdown(ctx context.Context) error { + errs := merror.New() + + err := s.Basic.Shutdown(ctx) + errs.Append(err) + + err = s.w.Close() + errs.Append(err) + + return errs.ErrorOrNil() +} + +// getCertPool returns a x509.CertPool containing the cert(s) +// from `cert`, which can be a path to a .pem or .crt file, +// or a base64 encoded cert. +func getCertPool(cert string) (*x509.CertPool, error) { + if cert == "" { + return nil, errors.New("no cert provided") + } + + // first treat as a file and try to read. + serverCert, err := ioutil.ReadFile(cert) + if err != nil { + // maybe it's a base64 encoded cert + serverCert, err = base64.StdEncoding.DecodeString(cert) + if err != nil { + return nil, errors.New("cert cannot be read") + } + } + + pool := x509.NewCertPool() + if ok := pool.AppendCertsFromPEM(serverCert); ok { + return pool, nil + } + return nil, errors.New("cannot parse cert") +} + +// Write converts the log record to bytes, via the Formatter, +// and outputs to syslog. +func (s *Syslog) Write(rec *logr.LogRec) error { + _, stacktrace := s.IsLevelEnabled(rec.Level()) + + buf := rec.Logger().Logr().BorrowBuffer() + defer rec.Logger().Logr().ReleaseBuffer(buf) + + buf, err := s.Formatter().Format(rec, stacktrace, buf) + if err != nil { + return err + } + txt := buf.String() + + switch rec.Level() { + case logr.Panic, logr.Fatal: + err = s.w.Crit(txt) + case logr.Error: + err = s.w.Err(txt) + case logr.Warn: + err = s.w.Warning(txt) + case logr.Debug, logr.Trace: + err = s.w.Debug(txt) + default: + // logr.Info plus all custom levels. + err = s.w.Info(txt) + } + + if err != nil { + reporter := rec.Logger().Logr().ReportError + reporter(fmt.Errorf("syslog write fail: %w", err)) + // syslog writer will try to reconnect. + } + return err +} + +// String returns a string representation of this target. +func (s *Syslog) String() string { + return "SyslogTarget" +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/tcp.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/tcp.go new file mode 100644 index 0000000..ffbd14f --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/tcp.go @@ -0,0 +1,273 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package mlog + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net" + _ "net/http/pprof" + "sync" + "time" + + "github.com/hashicorp/go-multierror" + "github.com/mattermost/logr" +) + +const ( + DialTimeoutSecs = 30 + WriteTimeoutSecs = 30 + RetryBackoffMillis int64 = 100 + MaxRetryBackoffMillis int64 = 30 * 1000 // 30 seconds +) + +// Tcp outputs log records to raw socket server. +type Tcp struct { + logr.Basic + + params *TcpParams + addy string + + mutex sync.Mutex + conn net.Conn + monitor chan struct{} + shutdown chan struct{} +} + +// TcpParams provides parameters for dialing a socket server. +type TcpParams struct { + IP string `json:"IP"` + Port int `json:"Port"` + TLS bool `json:"TLS"` + Cert string `json:"Cert"` + Insecure bool `json:"Insecure"` +} + +// NewTcpTarget creates a target capable of outputting log records to a raw socket, with or without TLS. +func NewTcpTarget(filter logr.Filter, formatter logr.Formatter, params *TcpParams, maxQueue int) (*Tcp, error) { + tcp := &Tcp{ + params: params, + addy: fmt.Sprintf("%s:%d", params.IP, params.Port), + monitor: make(chan struct{}), + shutdown: make(chan struct{}), + } + tcp.Basic.Start(tcp, tcp, filter, formatter, maxQueue) + + return tcp, nil +} + +// getConn provides a net.Conn. If a connection already exists, it is returned immediately, +// otherwise this method blocks until a new connection is created, timeout or shutdown. +func (tcp *Tcp) getConn() (net.Conn, error) { + tcp.mutex.Lock() + defer tcp.mutex.Unlock() + + Log(LvlTcpLogTarget, "getConn enter", String("addy", tcp.addy)) + defer Log(LvlTcpLogTarget, "getConn exit", String("addy", tcp.addy)) + + if tcp.conn != nil { + Log(LvlTcpLogTarget, "reusing existing conn", String("addy", tcp.addy)) // use "With" once Zap is removed + return tcp.conn, nil + } + + type result struct { + conn net.Conn + err error + } + + connChan := make(chan result) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*DialTimeoutSecs) + defer cancel() + + go func(ctx context.Context, ch chan result) { + Log(LvlTcpLogTarget, "dailing", String("addy", tcp.addy)) + conn, err := tcp.dial(ctx) + if err == nil { + tcp.conn = conn + tcp.monitor = make(chan struct{}) + go monitor(tcp.conn, tcp.monitor) + } + connChan <- result{conn: conn, err: err} + }(ctx, connChan) + + select { + case <-tcp.shutdown: + return nil, errors.New("shutdown") + case res := <-connChan: + return res.conn, res.err + } +} + +// dial connects to a TCP socket, and optionally performs a TLS handshake. +// A non-nil context must be provided which can cancel the dial. +func (tcp *Tcp) dial(ctx context.Context) (net.Conn, error) { + var dialer net.Dialer + dialer.Timeout = time.Second * DialTimeoutSecs + conn, err := dialer.DialContext(ctx, "tcp", fmt.Sprintf("%s:%d", tcp.params.IP, tcp.params.Port)) + if err != nil { + return nil, err + } + + if !tcp.params.TLS { + return conn, nil + } + + Log(LvlTcpLogTarget, "TLS handshake", String("addy", tcp.addy)) + + tlsconfig := &tls.Config{ + ServerName: tcp.params.IP, + InsecureSkipVerify: tcp.params.Insecure, + } + if tcp.params.Cert != "" { + pool, err := getCertPool(tcp.params.Cert) + if err != nil { + return nil, err + } + tlsconfig.RootCAs = pool + } + + tlsConn := tls.Client(conn, tlsconfig) + if err := tlsConn.Handshake(); err != nil { + return nil, err + } + return tlsConn, nil +} + +func (tcp *Tcp) close() error { + tcp.mutex.Lock() + defer tcp.mutex.Unlock() + + var err error + if tcp.conn != nil { + Log(LvlTcpLogTarget, "closing connection", String("addy", tcp.addy)) + close(tcp.monitor) + err = tcp.conn.Close() + tcp.conn = nil + } + return err +} + +// Shutdown stops processing log records after making best effort to flush queue. +func (tcp *Tcp) Shutdown(ctx context.Context) error { + errs := &multierror.Error{} + + Log(LvlTcpLogTarget, "shutting down", String("addy", tcp.addy)) + + if err := tcp.Basic.Shutdown(ctx); err != nil { + errs = multierror.Append(errs, err) + } + + if err := tcp.close(); err != nil { + errs = multierror.Append(errs, err) + } + + close(tcp.shutdown) + return errs.ErrorOrNil() +} + +// Write converts the log record to bytes, via the Formatter, and outputs to the socket. +// Called by dedicated target goroutine and will block until success or shutdown. +func (tcp *Tcp) Write(rec *logr.LogRec) error { + _, stacktrace := tcp.IsLevelEnabled(rec.Level()) + + buf := rec.Logger().Logr().BorrowBuffer() + defer rec.Logger().Logr().ReleaseBuffer(buf) + + buf, err := tcp.Formatter().Format(rec, stacktrace, buf) + if err != nil { + return err + } + + try := 1 + backoff := RetryBackoffMillis + for { + select { + case <-tcp.shutdown: + return err + default: + } + + conn, err := tcp.getConn() + if err != nil { + Log(LvlTcpLogTarget, "failed getting connection", String("addy", tcp.addy), Err(err)) + reporter := rec.Logger().Logr().ReportError + reporter(fmt.Errorf("log target %s connection error: %w", tcp.String(), err)) + backoff = tcp.sleep(backoff) + continue + } + + conn.SetWriteDeadline(time.Now().Add(time.Second * WriteTimeoutSecs)) + _, err = buf.WriteTo(conn) + if err == nil { + return nil + } + + Log(LvlTcpLogTarget, "write error", String("addy", tcp.addy), Err(err)) + reporter := rec.Logger().Logr().ReportError + reporter(fmt.Errorf("log target %s write error: %w", tcp.String(), err)) + + _ = tcp.close() + + backoff = tcp.sleep(backoff) + try++ + Log(LvlTcpLogTarget, "retrying write", String("addy", tcp.addy), Int("try", try)) + } +} + +// monitor continuously tries to read from the connection to detect socket close. +// This is needed because TCP target uses a write only socket and Linux systems +// take a long time to detect a loss of connectivity on a socket when only writing; +// the writes simply fail without an error returned. +func monitor(conn net.Conn, done <-chan struct{}) { + addy := conn.RemoteAddr().String() + defer Log(LvlTcpLogTarget, "monitor exiting", String("addy", addy)) + + buf := make([]byte, 1) + for { + Log(LvlTcpLogTarget, "monitor loop", String("addy", addy)) + + select { + case <-done: + return + case <-time.After(1 * time.Second): + } + + err := conn.SetReadDeadline(time.Now().Add(time.Second * 30)) + if err != nil { + continue + } + + _, err = conn.Read(buf) + + if errt, ok := err.(net.Error); ok && errt.Timeout() { + // read timeout is expected, keep looping. + continue + } + + // Any other error closes the connection, forcing a reconnect. + Log(LvlTcpLogTarget, "monitor closing connection", Err(err)) + conn.Close() + return + } +} + +// String returns a string representation of this target. +func (tcp *Tcp) String() string { + return fmt.Sprintf("TcpTarget[%s:%d]", tcp.params.IP, tcp.params.Port) +} + +func (tcp *Tcp) sleep(backoff int64) int64 { + select { + case <-tcp.shutdown: + case <-time.After(time.Millisecond * time.Duration(backoff)): + } + + nextBackoff := backoff + (backoff >> 1) + if nextBackoff > MaxRetryBackoffMillis { + nextBackoff = MaxRetryBackoffMillis + } + return nextBackoff +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/test-tls-client-cert.pem b/vendor/github.com/mattermost/mattermost-server/v5/mlog/test-tls-client-cert.pem new file mode 100644 index 0000000..6ce8d04 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/test-tls-client-cert.pem @@ -0,0 +1,43 @@ +-----BEGIN CERTIFICATE----- +MIIDjzCCAnegAwIBAgIRAPYfRSwdzKopBKxYxKqslJUwDQYJKoZIhvcNAQELBQAw +JzElMCMGA1UEAwwcTWF0dGVybW9zdCwgSW5jLiBJbnRlcm5hbCBDQTAeFw0xOTAz +MjIwMDE0MTVaFw0yMjAzMDYwMDE0MTVaMDsxOTA3BgNVBAMTME1hdHRlcm1vc3Qs +IEluYy4gSW50ZXJuYWwgSW50ZXJtZWRpYXRlIEF1dGhvcml0eTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAMjliRdmvnNL4u/Jr/M2dPwQmTJXEBY/Vq9Q +vAU52X3tRMCPxcaFz+x6ftuvdO2NdohXGAmtx9QU5LZcvFeTDpoVEBo9A+4jtLvD +DZYaTNLpJmoSoJHaDbdWX+OAOqyDiWS741LuiMKWHhew9QOisat2ZINPxjmAd9wE +xthTMgzsv7MUqnMer8U5OGQ0Qy7wAmNRc+2K3qPwkxe2RUvcte50DUFNgxEginsh +vrkOXR383vUCZfu72qu8oggjiQpyTllu5je2Ap6JLjYLkEMiMqrYADuWor/ZHwa6 +WrFqVETxWfAV5u9Eh0wZM/KKYwRQuw9y+Nans77FmUl1tVWWNN8CAwEAAaOBoTCB +njAMBgNVHRMEBTADAQH/MB0GA1UdDgQWBBQY4Uqswyr2hO/HetZt2RDxJdTIPjBi +BgNVHSMEWzBZgBRFZXVg2Z5tNIsWeWjBLEy2yzKbMKErpCkwJzElMCMGA1UEAwwc +TWF0dGVybW9zdCwgSW5jLiBJbnRlcm5hbCBDQYIUEifGUOM+bIFZo1tkjZB5YGBr +0xEwCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBCwUAA4IBAQAEdexL30Q0zBHmPAH8 +LhdK7dbzW1CmILbxRZlKAwRN+hKRXiMW3MHIkhNuoV9Aev602Q+ja4lWsRi/ktOL +ni1FWx5gSScgdG8JGj47dOmoT3vXKX7+umiv4rQLPDl9/DKMuv204OYJq6VT+uNU +6C6kL157jGJEO76H4fMZ8oYsD7Sq0zjiNKtuCYii0ngH3j3gB1jACLqRgveU7MdT +pqOV2KfY31+h8VBtkUvljNztQ9xNY8Fjmt0SMf7E3FaUcaar3ZCr70G5aU3dKbe7 +47vGOBa5tCqw4YK0jgDKid3IJQul9a3J1mSsH8Wy3to9cAV4KGZBQLnzCX15a/+v +3yVh +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDfjCCAmagAwIBAgIUEifGUOM+bIFZo1tkjZB5YGBr0xEwDQYJKoZIhvcNAQEL +BQAwJzElMCMGA1UEAwwcTWF0dGVybW9zdCwgSW5jLiBJbnRlcm5hbCBDQTAeFw0x +OTAzMjEyMTI4NDNaFw0yOTAzMTgyMTI4NDNaMCcxJTAjBgNVBAMMHE1hdHRlcm1v +c3QsIEluYy4gSW50ZXJuYWwgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQDH0Xq5rMBGpKOVWTpb5MnaJIWFP/vOtvEk+7hVrfOfe1/5x0Kk3UgAHj85 +otaEZD1Lhn/JLkEqCiE/UXMJFwJDlNcO4CkdKBSpYX4bKAqy5q/X3QwioMSNpJG1 ++YYrNGBH0sgKcKjyCaLhmqYLD0xZDVOmWIYBU9jUPyXw5U0tnsVrTqGMxVkm1xCY +krCWN1ZoUrLvL0MCZc5qpxoPTopr9UO9cqSBSuy6BVWVuEWBZhpqHt+ul8VxhzzY +q1k4l7r2qw+/wm1iJBedTeBVeWNag8JaVfLgu+/W7oJVlPO32Po7pnvHp8iJ3b4K +zXyVHaTX4S6Em+6LV8855TYrShzlAgMBAAGjgaEwgZ4wHQYDVR0OBBYEFEVldWDZ +nm00ixZ5aMEsTLbLMpswMGIGA1UdIwRbMFmAFEVldWDZnm00ixZ5aMEsTLbLMpsw +oSukKTAnMSUwIwYDVQQDDBxNYXR0ZXJtb3N0LCBJbmMuIEludGVybmFsIENBghQS +J8ZQ4z5sgVmjW2SNkHlgYGvTETAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjAN +BgkqhkiG9w0BAQsFAAOCAQEAPiCWFmopyAkY2T3Zyo4yaRPhX1+VOTMKJtY6EUhq +/GHz6kzEyvCUBf0N892cibGxekrEoItY9NqO6RQRfowg+Gn5kc13z4NyL2W8/eoT +Xy0ZvfaQbU++fQ6pVtWtMblDMU9xiYd7/MDvJpO328l1Vhcdp8kEi+lCvpy0sCRc +PxzPhbgCMAbZEGx+4TMQd4SZKzlRxW/2fflpReh6v1Dv0VDUSYQWwsUnaLpdKHfh +a5k0vuySYcszE4YKlY0zakeFlJfp7fBp1xTwcdW8aTfw15EicPMwTc6xxA4JJUJx +cddu817n1nayK5u6r9Qh1oIVkr0nC9YELMMy4dpPgJ88SA== +-----END CERTIFICATE----- diff --git a/vendor/github.com/mattermost/mattermost-server/mlog/testing.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/testing.go similarity index 68% rename from vendor/github.com/mattermost/mattermost-server/mlog/testing.go rename to vendor/github.com/mattermost/mattermost-server/v5/mlog/testing.go index 1b97c2f..c667283 100644 --- a/vendor/github.com/mattermost/mattermost-server/mlog/testing.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/testing.go @@ -1,10 +1,12 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package mlog import ( + "io" "strings" + "sync" "testing" "go.uber.org/zap" @@ -23,16 +25,19 @@ func (tw *testingWriter) Write(b []byte) (int, error) { // NewTestingLogger creates a Logger that proxies logs through a testing interface. // This allows tests that spin up App instances to avoid spewing logs unless the test fails or -verbose is specified. -func NewTestingLogger(tb testing.TB) *Logger { +func NewTestingLogger(tb testing.TB, writer io.Writer) *Logger { logWriter := &testingWriter{tb} - logWriterSync := zapcore.AddSync(logWriter) + multiWriter := io.MultiWriter(logWriter, writer) + logWriterSync := zapcore.AddSync(multiWriter) testingLogger := &Logger{ consoleLevel: zap.NewAtomicLevelAt(getZapLevel("debug")), fileLevel: zap.NewAtomicLevelAt(getZapLevel("info")), + logrLogger: newLogr(), + mutex: &sync.RWMutex{}, } - logWriterCore := zapcore.NewCore(makeEncoder(true), logWriterSync, testingLogger.consoleLevel) + logWriterCore := zapcore.NewCore(makeEncoder(true), zapcore.Lock(logWriterSync), testingLogger.consoleLevel) testingLogger.zap = zap.New(logWriterCore, zap.AddCaller(), diff --git a/vendor/github.com/mattermost/mattermost-server/model/access.go b/vendor/github.com/mattermost/mattermost-server/v5/model/access.go similarity index 92% rename from vendor/github.com/mattermost/mattermost-server/model/access.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/access.go index e9603c7..d6b06f4 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/access.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/access.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -31,6 +31,7 @@ type AccessResponse struct { ExpiresIn int32 `json:"expires_in"` Scope string `json:"scope"` RefreshToken string `json:"refresh_token"` + IdToken string `json:"id_token"` } // IsValid validates the AccessData and returns an error if it isn't configured @@ -60,13 +61,13 @@ func (ad *AccessData) IsValid() *AppError { return nil } -func (me *AccessData) IsExpired() bool { +func (ad *AccessData) IsExpired() bool { - if me.ExpiresAt <= 0 { + if ad.ExpiresAt <= 0 { return false } - if GetMillis() > me.ExpiresAt { + if GetMillis() > ad.ExpiresAt { return true } diff --git a/vendor/github.com/mattermost/mattermost-server/model/analytics_row.go b/vendor/github.com/mattermost/mattermost-server/v5/model/analytics_row.go similarity index 52% rename from vendor/github.com/mattermost/mattermost-server/model/analytics_row.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/analytics_row.go index 4615bb7..d121666 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/analytics_row.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/analytics_row.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -15,27 +15,27 @@ type AnalyticsRow struct { type AnalyticsRows []*AnalyticsRow -func (me *AnalyticsRow) ToJson() string { - b, _ := json.Marshal(me) +func (ar *AnalyticsRow) ToJson() string { + b, _ := json.Marshal(ar) return string(b) } func AnalyticsRowFromJson(data io.Reader) *AnalyticsRow { - var me *AnalyticsRow - json.NewDecoder(data).Decode(&me) - return me + var ar *AnalyticsRow + json.NewDecoder(data).Decode(&ar) + return ar } -func (me AnalyticsRows) ToJson() string { - if b, err := json.Marshal(me); err != nil { +func (ar AnalyticsRows) ToJson() string { + b, err := json.Marshal(ar) + if err != nil { return "[]" - } else { - return string(b) } + return string(b) } func AnalyticsRowsFromJson(data io.Reader) AnalyticsRows { - var me AnalyticsRows - json.NewDecoder(data).Decode(&me) - return me + var ar AnalyticsRows + json.NewDecoder(data).Decode(&ar) + return ar } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/at_mentions.go b/vendor/github.com/mattermost/mattermost-server/v5/model/at_mentions.go new file mode 100644 index 0000000..f41d182 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/at_mentions.go @@ -0,0 +1,47 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "regexp" + "strings" +) + +var atMentionRegexp = regexp.MustCompile(`\B@[[:alnum:]][[:alnum:]\.\-_]*`) + +const usernameSpecialChars = ".-_" + +// PossibleAtMentions returns all substrings in message that look like valid @ +// mentions. +func PossibleAtMentions(message string) []string { + var names []string + + if !strings.Contains(message, "@") { + return names + } + + alreadyMentioned := make(map[string]bool) + for _, match := range atMentionRegexp.FindAllString(message, -1) { + name := NormalizeUsername(match[1:]) + if !alreadyMentioned[name] && IsValidUsername(name) { + names = append(names, name) + alreadyMentioned[name] = true + } + } + + return names +} + +// TrimUsernameSpecialChar tries to remove the last character from word if it +// is a special character for usernames (dot, dash or underscore). If not, it +// returns the same string. +func TrimUsernameSpecialChar(word string) (string, bool) { + len := len(word) + + if len > 0 && strings.LastIndexAny(word, usernameSpecialChars) == (len-1) { + return word[:len-1], true + } + + return word, false +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/audit.go b/vendor/github.com/mattermost/mattermost-server/v5/model/audit.go similarity index 92% rename from vendor/github.com/mattermost/mattermost-server/model/audit.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/audit.go index e3d1bdf..dd1e060 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/audit.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/audit.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/auditconv.go b/vendor/github.com/mattermost/mattermost-server/v5/model/auditconv.go new file mode 100644 index 0000000..24a8d94 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/auditconv.go @@ -0,0 +1,669 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "github.com/francoispqt/gojay" +) + +// AuditModelTypeConv converts key model types to something better suited for audit output. +func AuditModelTypeConv(val interface{}) (newVal interface{}, converted bool) { + if val == nil { + return nil, false + } + switch v := val.(type) { + case *Channel: + return newAuditChannel(v), true + case *Team: + return newAuditTeam(v), true + case *User: + return newAuditUser(v), true + case *Command: + return newAuditCommand(v), true + case *CommandArgs: + return newAuditCommandArgs(v), true + case *Bot: + return newAuditBot(v), true + case *ChannelModerationPatch: + return newAuditChannelModerationPatch(v), true + case *Emoji: + return newAuditEmoji(v), true + case *FileInfo: + return newAuditFileInfo(v), true + case *Group: + return newAuditGroup(v), true + case *Job: + return newAuditJob(v), true + case *OAuthApp: + return newAuditOAuthApp(v), true + case *Post: + return newAuditPost(v), true + case *Role: + return newAuditRole(v), true + case *Scheme: + return newAuditScheme(v), true + case *SchemeRoles: + return newAuditSchemeRoles(v), true + case *Session: + return newAuditSession(v), true + case *IncomingWebhook: + return newAuditIncomingWebhook(v), true + case *OutgoingWebhook: + return newAuditOutgoingWebhook(v), true + } + return val, false +} + +type auditChannel struct { + ID string + Name string + Type string +} + +// newAuditChannel creates a simplified representation of Channel for output to audit log. +func newAuditChannel(c *Channel) auditChannel { + var channel auditChannel + if c != nil { + channel.ID = c.Id + channel.Name = c.Name + channel.Type = c.Type + } + return channel +} + +func (c auditChannel) MarshalJSONObject(enc *gojay.Encoder) { + enc.StringKey("id", c.ID) + enc.StringKey("name", c.Name) + enc.StringKey("type", c.Type) +} + +func (c auditChannel) IsNil() bool { + return false +} + +type auditTeam struct { + ID string + Name string + Type string +} + +// newAuditTeam creates a simplified representation of Team for output to audit log. +func newAuditTeam(t *Team) auditTeam { + var team auditTeam + if t != nil { + team.ID = t.Id + team.Name = t.Name + team.Type = t.Type + } + return team +} + +func (t auditTeam) MarshalJSONObject(enc *gojay.Encoder) { + enc.StringKey("id", t.ID) + enc.StringKey("name", t.Name) + enc.StringKey("type", t.Type) +} + +func (t auditTeam) IsNil() bool { + return false +} + +type auditUser struct { + ID string + Name string + Roles string +} + +// newAuditUser creates a simplified representation of User for output to audit log. +func newAuditUser(u *User) auditUser { + var user auditUser + if u != nil { + user.ID = u.Id + user.Name = u.Username + user.Roles = u.Roles + } + return user +} + +func (u auditUser) MarshalJSONObject(enc *gojay.Encoder) { + enc.StringKey("id", u.ID) + enc.StringKey("name", u.Name) + enc.StringKey("roles", u.Roles) +} + +func (u auditUser) IsNil() bool { + return false +} + +type auditCommand struct { + ID string + CreatorID string + TeamID string + Trigger string + Method string + Username string + IconURL string + AutoComplete bool + AutoCompleteDesc string + AutoCompleteHint string + DisplayName string + Description string + URL string +} + +// newAuditCommand creates a simplified representation of Command for output to audit log. +func newAuditCommand(c *Command) auditCommand { + var cmd auditCommand + if c != nil { + cmd.ID = c.Id + cmd.CreatorID = c.CreatorId + cmd.TeamID = c.TeamId + cmd.Trigger = c.Trigger + cmd.Method = c.Method + cmd.Username = c.Username + cmd.IconURL = c.IconURL + cmd.AutoComplete = c.AutoComplete + cmd.AutoCompleteDesc = c.AutoCompleteDesc + cmd.AutoCompleteHint = c.AutoCompleteHint + cmd.DisplayName = c.DisplayName + cmd.Description = c.Description + cmd.URL = c.URL + } + return cmd +} + +func (cmd auditCommand) MarshalJSONObject(enc *gojay.Encoder) { + enc.StringKey("id", cmd.ID) + enc.StringKey("creator_id", cmd.CreatorID) + enc.StringKey("team_id", cmd.TeamID) + enc.StringKey("trigger", cmd.Trigger) + enc.StringKey("method", cmd.Method) + enc.StringKey("username", cmd.Username) + enc.StringKey("icon_url", cmd.IconURL) + enc.BoolKey("auto_complete", cmd.AutoComplete) + enc.StringKey("auto_complete_desc", cmd.AutoCompleteDesc) + enc.StringKey("auto_complete_hint", cmd.AutoCompleteHint) + enc.StringKey("display", cmd.DisplayName) + enc.StringKey("desc", cmd.Description) + enc.StringKey("url", cmd.URL) +} + +func (cmd auditCommand) IsNil() bool { + return false +} + +type auditCommandArgs struct { + ChannelID string + TeamID string + TriggerID string + Command string +} + +// newAuditCommandArgs creates a simplified representation of CommandArgs for output to audit log. +func newAuditCommandArgs(ca *CommandArgs) auditCommandArgs { + var cmdargs auditCommandArgs + if ca != nil { + cmdargs.ChannelID = ca.ChannelId + cmdargs.TeamID = ca.TeamId + cmdargs.TriggerID = ca.TriggerId + cmdargs.Command = ca.Command + } + return cmdargs +} + +func (ca auditCommandArgs) MarshalJSONObject(enc *gojay.Encoder) { + enc.StringKey("channel_id", ca.ChannelID) + enc.StringKey("team_id", ca.TriggerID) + enc.StringKey("trigger_id", ca.TeamID) + enc.StringKey("command", ca.Command) +} + +func (ca auditCommandArgs) IsNil() bool { + return false +} + +type auditBot struct { + UserID string + Username string + Displayname string +} + +// newAuditBot creates a simplified representation of Bot for output to audit log. +func newAuditBot(b *Bot) auditBot { + var bot auditBot + if b != nil { + bot.UserID = b.UserId + bot.Username = b.Username + bot.Displayname = b.DisplayName + } + return bot +} + +func (b auditBot) MarshalJSONObject(enc *gojay.Encoder) { + enc.StringKey("user_id", b.UserID) + enc.StringKey("username", b.Username) + enc.StringKey("display", b.Displayname) +} + +func (b auditBot) IsNil() bool { + return false +} + +type auditChannelModerationPatch struct { + Name string + RoleGuests bool + RoleMembers bool +} + +// newAuditChannelModerationPatch creates a simplified representation of ChannelModerationPatch for output to audit log. +func newAuditChannelModerationPatch(p *ChannelModerationPatch) auditChannelModerationPatch { + var patch auditChannelModerationPatch + if p != nil { + if p.Name != nil { + patch.Name = *p.Name + } + if p.Roles.Guests != nil { + patch.RoleGuests = *p.Roles.Guests + } + if p.Roles.Members != nil { + patch.RoleMembers = *p.Roles.Members + } + } + return patch +} + +func (p auditChannelModerationPatch) MarshalJSONObject(enc *gojay.Encoder) { + enc.StringKey("name", p.Name) + enc.BoolKey("role_guests", p.RoleGuests) + enc.BoolKey("role_members", p.RoleMembers) +} + +func (p auditChannelModerationPatch) IsNil() bool { + return false +} + +type auditEmoji struct { + ID string + Name string +} + +// newAuditEmoji creates a simplified representation of Emoji for output to audit log. +func newAuditEmoji(e *Emoji) auditEmoji { + var emoji auditEmoji + if e != nil { + emoji.ID = e.Id + emoji.Name = e.Name + } + return emoji +} + +func (e auditEmoji) MarshalJSONObject(enc *gojay.Encoder) { + enc.StringKey("id", e.ID) + enc.StringKey("name", e.Name) +} + +func (e auditEmoji) IsNil() bool { + return false +} + +type auditFileInfo struct { + ID string + PostID string + Path string + Name string + Extension string + Size int64 +} + +// newAuditFileInfo creates a simplified representation of FileInfo for output to audit log. +func newAuditFileInfo(f *FileInfo) auditFileInfo { + var fi auditFileInfo + if f != nil { + fi.ID = f.Id + fi.PostID = f.PostId + fi.Path = f.Path + fi.Name = f.Name + fi.Extension = f.Extension + fi.Size = f.Size + } + return fi +} + +func (fi auditFileInfo) MarshalJSONObject(enc *gojay.Encoder) { + enc.StringKey("id", fi.ID) + enc.StringKey("post_id", fi.PostID) + enc.StringKey("path", fi.Path) + enc.StringKey("name", fi.Name) + enc.StringKey("ext", fi.Extension) + enc.Int64Key("size", fi.Size) +} + +func (fi auditFileInfo) IsNil() bool { + return false +} + +type auditGroup struct { + ID string + Name string + DisplayName string + Description string +} + +// newAuditGroup creates a simplified representation of Group for output to audit log. +func newAuditGroup(g *Group) auditGroup { + var group auditGroup + if g != nil { + group.ID = g.Id + if g.Name == nil { + group.Name = "" + } else { + group.Name = *g.Name + } + group.DisplayName = g.DisplayName + group.Description = g.Description + } + return group +} + +func (g auditGroup) MarshalJSONObject(enc *gojay.Encoder) { + enc.StringKey("id", g.ID) + enc.StringKey("name", g.Name) + enc.StringKey("display", g.DisplayName) + enc.StringKey("desc", g.Description) +} + +func (g auditGroup) IsNil() bool { + return false +} + +type auditJob struct { + ID string + Type string + Priority int64 + StartAt int64 +} + +// newAuditJob creates a simplified representation of Job for output to audit log. +func newAuditJob(j *Job) auditJob { + var job auditJob + if j != nil { + job.ID = j.Id + job.Type = j.Type + job.Priority = j.Priority + job.StartAt = j.StartAt + } + return job +} + +func (j auditJob) MarshalJSONObject(enc *gojay.Encoder) { + enc.StringKey("id", j.ID) + enc.StringKey("type", j.Type) + enc.Int64Key("priority", j.Priority) + enc.Int64Key("start_at", j.StartAt) +} + +func (j auditJob) IsNil() bool { + return false +} + +type auditOAuthApp struct { + ID string + CreatorID string + Name string + Description string + IsTrusted bool +} + +// newAuditOAuthApp creates a simplified representation of OAuthApp for output to audit log. +func newAuditOAuthApp(o *OAuthApp) auditOAuthApp { + var oauth auditOAuthApp + if o != nil { + oauth.ID = o.Id + oauth.CreatorID = o.CreatorId + oauth.Name = o.Name + oauth.Description = o.Description + oauth.IsTrusted = o.IsTrusted + } + return oauth +} + +func (o auditOAuthApp) MarshalJSONObject(enc *gojay.Encoder) { + enc.StringKey("id", o.ID) + enc.StringKey("creator_id", o.CreatorID) + enc.StringKey("name", o.Name) + enc.StringKey("desc", o.Description) + enc.BoolKey("trusted", o.IsTrusted) +} + +func (o auditOAuthApp) IsNil() bool { + return false +} + +type auditPost struct { + ID string + ChannelID string + Type string + IsPinned bool +} + +// newAuditPost creates a simplified representation of Post for output to audit log. +func newAuditPost(p *Post) auditPost { + var post auditPost + if p != nil { + post.ID = p.Id + post.ChannelID = p.ChannelId + post.Type = p.Type + post.IsPinned = p.IsPinned + } + return post +} + +func (p auditPost) MarshalJSONObject(enc *gojay.Encoder) { + enc.StringKey("id", p.ID) + enc.StringKey("channel_id", p.ChannelID) + enc.StringKey("type", p.Type) + enc.BoolKey("pinned", p.IsPinned) +} + +func (p auditPost) IsNil() bool { + return false +} + +type auditRole struct { + ID string + Name string + DisplayName string + Permissions []string + SchemeManaged bool + BuiltIn bool +} + +// newAuditRole creates a simplified representation of Role for output to audit log. +func newAuditRole(r *Role) auditRole { + var role auditRole + if r != nil { + role.ID = r.Id + role.Name = r.Name + role.DisplayName = r.DisplayName + role.Permissions = append(role.Permissions, r.Permissions...) + role.SchemeManaged = r.SchemeManaged + role.BuiltIn = r.BuiltIn + } + return role +} + +func (r auditRole) MarshalJSONObject(enc *gojay.Encoder) { + enc.StringKey("id", r.ID) + enc.StringKey("name", r.Name) + enc.StringKey("display", r.DisplayName) + enc.SliceStringKey("perms", r.Permissions) + enc.BoolKey("schemeManaged", r.SchemeManaged) + enc.BoolKey("builtin", r.BuiltIn) +} + +func (r auditRole) IsNil() bool { + return false +} + +type auditScheme struct { + ID string + Name string + DisplayName string + Scope string +} + +// newAuditScheme creates a simplified representation of Scheme for output to audit log. +func newAuditScheme(s *Scheme) auditScheme { + var scheme auditScheme + if s != nil { + scheme.ID = s.Id + scheme.Name = s.Name + scheme.DisplayName = s.DisplayName + scheme.Scope = s.Scope + } + return scheme +} + +func (s auditScheme) MarshalJSONObject(enc *gojay.Encoder) { + enc.StringKey("id", s.ID) + enc.StringKey("name", s.Name) + enc.StringKey("display", s.DisplayName) + enc.StringKey("scope", s.Scope) +} + +func (s auditScheme) IsNil() bool { + return false +} + +type auditSchemeRoles struct { + SchemeAdmin bool + SchemeUser bool + SchemeGuest bool +} + +// newAuditSchemeRoles creates a simplified representation of SchemeRoles for output to audit log. +func newAuditSchemeRoles(s *SchemeRoles) auditSchemeRoles { + var roles auditSchemeRoles + if s != nil { + roles.SchemeAdmin = s.SchemeAdmin + roles.SchemeUser = s.SchemeUser + roles.SchemeGuest = s.SchemeGuest + } + return roles +} + +func (s auditSchemeRoles) MarshalJSONObject(enc *gojay.Encoder) { + enc.BoolKey("admin", s.SchemeAdmin) + enc.BoolKey("user", s.SchemeUser) + enc.BoolKey("guest", s.SchemeGuest) +} + +func (s auditSchemeRoles) IsNil() bool { + return false +} + +type auditSession struct { + ID string + UserId string + DeviceId string +} + +// newAuditSession creates a simplified representation of Session for output to audit log. +func newAuditSession(s *Session) auditSession { + var session auditSession + if s != nil { + session.ID = s.Id + session.UserId = s.UserId + session.DeviceId = s.DeviceId + } + return session +} + +func (s auditSession) MarshalJSONObject(enc *gojay.Encoder) { + enc.StringKey("id", s.ID) + enc.StringKey("user_id", s.UserId) + enc.StringKey("device_id", s.DeviceId) +} + +func (s auditSession) IsNil() bool { + return false +} + +type auditIncomingWebhook struct { + ID string + ChannelID string + TeamId string + DisplayName string + Description string +} + +// newAuditIncomingWebhook creates a simplified representation of IncomingWebhook for output to audit log. +func newAuditIncomingWebhook(h *IncomingWebhook) auditIncomingWebhook { + var hook auditIncomingWebhook + if h != nil { + hook.ID = h.Id + hook.ChannelID = h.ChannelId + hook.TeamId = h.TeamId + hook.DisplayName = h.DisplayName + hook.Description = h.Description + } + return hook +} + +func (h auditIncomingWebhook) MarshalJSONObject(enc *gojay.Encoder) { + enc.StringKey("id", h.ID) + enc.StringKey("channel_id", h.ChannelID) + enc.StringKey("team_id", h.TeamId) + enc.StringKey("display", h.DisplayName) + enc.StringKey("desc", h.Description) +} + +func (h auditIncomingWebhook) IsNil() bool { + return false +} + +type auditOutgoingWebhook struct { + ID string + ChannelID string + TeamID string + TriggerWords StringArray + TriggerWhen int + DisplayName string + Description string + ContentType string + Username string +} + +// newAuditOutgoingWebhook creates a simplified representation of OutgoingWebhook for output to audit log. +func newAuditOutgoingWebhook(h *OutgoingWebhook) auditOutgoingWebhook { + var hook auditOutgoingWebhook + if h != nil { + hook.ID = h.Id + hook.ChannelID = h.ChannelId + hook.TeamID = h.TeamId + hook.TriggerWords = h.TriggerWords + hook.TriggerWhen = h.TriggerWhen + hook.DisplayName = h.DisplayName + hook.Description = h.Description + hook.ContentType = h.ContentType + hook.Username = h.Username + } + return hook +} + +func (h auditOutgoingWebhook) MarshalJSONObject(enc *gojay.Encoder) { + enc.StringKey("id", h.ID) + enc.StringKey("channel_id", h.ChannelID) + enc.StringKey("team_id", h.TeamID) + enc.SliceStringKey("trigger_words", h.TriggerWords) + enc.IntKey("trigger_when", h.TriggerWhen) + enc.StringKey("display", h.DisplayName) + enc.StringKey("desc", h.Description) + enc.StringKey("content_type", h.ContentType) + enc.StringKey("username", h.Username) +} + +func (h auditOutgoingWebhook) IsNil() bool { + return false +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/audits.go b/vendor/github.com/mattermost/mattermost-server/v5/model/audits.go similarity index 75% rename from vendor/github.com/mattermost/mattermost-server/model/audits.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/audits.go index 3673eb6..54e361f 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/audits.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/audits.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -14,17 +14,16 @@ func (o Audits) Etag() string { if len(o) > 0 { // the first in the list is always the most current return Etag(o[0].CreateAt) - } else { - return "" } + return "" } func (o Audits) ToJson() string { - if b, err := json.Marshal(o); err != nil { + b, err := json.Marshal(o) + if err != nil { return "[]" - } else { - return string(b) } + return string(b) } func AuditsFromJson(data io.Reader) Audits { diff --git a/vendor/github.com/mattermost/mattermost-server/model/authorize.go b/vendor/github.com/mattermost/mattermost-server/v5/model/authorize.go similarity index 96% rename from vendor/github.com/mattermost/mattermost-server/model/authorize.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/authorize.go index 922adc0..0191a67 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/authorize.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/authorize.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -39,11 +39,11 @@ type AuthorizeRequest struct { // correctly. func (ad *AuthData) IsValid() *AppError { - if len(ad.ClientId) != 26 { + if !IsValidId(ad.ClientId) { return NewAppError("AuthData.IsValid", "model.authorize.is_valid.client_id.app_error", nil, "", http.StatusBadRequest) } - if len(ad.UserId) != 26 { + if !IsValidId(ad.UserId) { return NewAppError("AuthData.IsValid", "model.authorize.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) } @@ -78,7 +78,7 @@ func (ad *AuthData) IsValid() *AppError { // correctly. func (ar *AuthorizeRequest) IsValid() *AppError { - if len(ar.ClientId) != 26 { + if !IsValidId(ar.ClientId) { return NewAppError("AuthData.IsValid", "model.authorize.is_valid.client_id.app_error", nil, "", http.StatusBadRequest) } diff --git a/vendor/github.com/mattermost/mattermost-server/model/bot.go b/vendor/github.com/mattermost/mattermost-server/v5/model/bot.go similarity index 82% rename from vendor/github.com/mattermost/mattermost-server/model/bot.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/bot.go index 079e1b1..fb46be4 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/bot.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/bot.go @@ -1,5 +1,5 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -13,23 +13,25 @@ import ( ) const ( - BOT_DISPLAY_NAME_MAX_RUNES = USER_FIRST_NAME_MAX_RUNES - BOT_DESCRIPTION_MAX_RUNES = 1024 - BOT_CREATOR_ID_MAX_RUNES = KEY_VALUE_PLUGIN_ID_MAX_RUNES // UserId or PluginId + BOT_DISPLAY_NAME_MAX_RUNES = USER_FIRST_NAME_MAX_RUNES + BOT_DESCRIPTION_MAX_RUNES = 1024 + BOT_CREATOR_ID_MAX_RUNES = KEY_VALUE_PLUGIN_ID_MAX_RUNES // UserId or PluginId + BOT_WARN_METRIC_BOT_USERNAME = "mattermost-advisor" ) // Bot is a special type of User meant for programmatic interactions. // Note that the primary key of a bot is the UserId, and matches the primary key of the // corresponding user. type Bot struct { - UserId string `json:"user_id"` - Username string `json:"username"` - DisplayName string `json:"display_name,omitempty"` - Description string `json:"description,omitempty"` - OwnerId string `json:"owner_id"` - CreateAt int64 `json:"create_at"` - UpdateAt int64 `json:"update_at"` - DeleteAt int64 `json:"delete_at"` + UserId string `json:"user_id"` + Username string `json:"username"` + DisplayName string `json:"display_name,omitempty"` + Description string `json:"description,omitempty"` + OwnerId string `json:"owner_id"` + LastIconUpdate int64 `json:"last_icon_update,omitempty"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + DeleteAt int64 `json:"delete_at"` } // BotPatch is a description of what fields to update on an existing bot. @@ -167,7 +169,7 @@ func UserFromBot(b *Bot) *User { return &User{ Id: b.UserId, Username: b.Username, - Email: fmt.Sprintf("%s@localhost", strings.ToLower(b.Username)), + Email: NormalizeEmail(fmt.Sprintf("%s@localhost", b.Username)), FirstName: b.DisplayName, Roles: SYSTEM_USER_ROLE_ID, } @@ -218,3 +220,15 @@ func (l *BotList) Etag() string { func MakeBotNotFoundError(userId string) *AppError { return NewAppError("SqlBotStore.Get", "store.sql_bot.get.missing.app_error", map[string]interface{}{"user_id": userId}, "", http.StatusNotFound) } + +func IsBotDMChannel(channel *Channel, botUserID string) bool { + if channel.Type != CHANNEL_DIRECT { + return false + } + + if !strings.HasPrefix(channel.Name, botUserID+"__") && !strings.HasSuffix(channel.Name, "__"+botUserID) { + return false + } + + return true +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/bot_default_image.go b/vendor/github.com/mattermost/mattermost-server/v5/model/bot_default_image.go similarity index 99% rename from vendor/github.com/mattermost/mattermost-server/model/bot_default_image.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/bot_default_image.go index 96dd77e..d9cdd2e 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/bot_default_image.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/bot_default_image.go @@ -1,5 +1,5 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model diff --git a/vendor/github.com/mattermost/mattermost-server/model/builtin.go b/vendor/github.com/mattermost/mattermost-server/v5/model/builtin.go similarity index 86% rename from vendor/github.com/mattermost/mattermost-server/model/builtin.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/builtin.go index 5dd00a9..38e01f8 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/builtin.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/builtin.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model diff --git a/vendor/github.com/mattermost/mattermost-server/model/bundle_info.go b/vendor/github.com/mattermost/mattermost-server/v5/model/bundle_info.go similarity index 86% rename from vendor/github.com/mattermost/mattermost-server/model/bundle_info.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/bundle_info.go index 7cb0670..5954432 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/bundle_info.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/bundle_info.go @@ -1,9 +1,11 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model -import "github.com/mattermost/mattermost-server/mlog" +import ( + "github.com/mattermost/mattermost-server/v5/mlog" +) type BundleInfo struct { Path string diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel.go b/vendor/github.com/mattermost/mattermost-server/v5/model/channel.go similarity index 71% rename from vendor/github.com/mattermost/mattermost-server/model/channel.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/channel.go index d0d1b8f..4a62ea4 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/channel.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/channel.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -51,6 +51,7 @@ type Channel struct { SchemeId *string `json:"scheme_id"` Props map[string]interface{} `json:"props" db:"-"` GroupConstrained *bool `json:"group_constrained"` + Shared *bool `json:"shared"` } type ChannelWithTeamData struct { @@ -84,18 +85,60 @@ type DirectChannelForExport struct { Members *[]string } +type ChannelModeration struct { + Name string `json:"name"` + Roles *ChannelModeratedRoles `json:"roles"` +} + +type ChannelModeratedRoles struct { + Guests *ChannelModeratedRole `json:"guests"` + Members *ChannelModeratedRole `json:"members"` +} + +type ChannelModeratedRole struct { + Value bool `json:"value"` + Enabled bool `json:"enabled"` +} + +type ChannelModerationPatch struct { + Name *string `json:"name"` + Roles *ChannelModeratedRolesPatch `json:"roles"` +} + +type ChannelModeratedRolesPatch struct { + Guests *bool `json:"guests"` + Members *bool `json:"members"` +} + // ChannelSearchOpts contains options for searching channels. // // NotAssociatedToGroup will exclude channels that have associated, active GroupChannels records. // ExcludeDefaultChannels will exclude the configured default channels (ex 'town-square' and 'off-topic'). // IncludeDeleted will include channel records where DeleteAt != 0. // ExcludeChannelNames will exclude channels from the results by name. +// Paginate whether to paginate the results. +// Page page requested, if results are paginated. +// PerPage number of results per page, if paginated. // type ChannelSearchOpts struct { - NotAssociatedToGroup string - ExcludeDefaultChannels bool - IncludeDeleted bool - ExcludeChannelNames []string + NotAssociatedToGroup string + ExcludeDefaultChannels bool + IncludeDeleted bool + Deleted bool + ExcludeChannelNames []string + TeamIds []string + GroupConstrained bool + ExcludeGroupConstrained bool + Public bool + Private bool + Page *int + PerPage *int +} + +type ChannelMemberCountByGroup struct { + GroupId string `db:"-" json:"group_id"` + ChannelMemberCount int64 `db:"-" json:"channel_member_count"` + ChannelMemberTimezonesCount int64 `db:"-" json:"channel_member_timezones_count"` } func (o *Channel) DeepCopy() *Channel { @@ -139,12 +182,30 @@ func ChannelPatchFromJson(data io.Reader) *ChannelPatch { return o } +func ChannelModerationsFromJson(data io.Reader) []*ChannelModeration { + var o []*ChannelModeration + json.NewDecoder(data).Decode(&o) + return o +} + +func ChannelModerationsPatchFromJson(data io.Reader) []*ChannelModerationPatch { + var o []*ChannelModerationPatch + json.NewDecoder(data).Decode(&o) + return o +} + +func ChannelMemberCountsByGroupFromJson(data io.Reader) []*ChannelMemberCountByGroup { + var o []*ChannelMemberCountByGroup + json.NewDecoder(data).Decode(&o) + return o +} + func (o *Channel) Etag() string { return Etag(o.Id, o.UpdateAt) } func (o *Channel) IsValid() *AppError { - if len(o.Id) != 26 { + if !IsValidId(o.Id) { return NewAppError("Channel.IsValid", "model.channel.is_valid.id.app_error", nil, "", http.StatusBadRequest) } @@ -180,6 +241,11 @@ func (o *Channel) IsValid() *AppError { return NewAppError("Channel.IsValid", "model.channel.is_valid.creator_id.app_error", nil, "", http.StatusBadRequest) } + userIds := strings.Split(o.Name, "__") + if o.Type != CHANNEL_DIRECT && len(userIds) == 2 && IsValidId(userIds[0]) && IsValidId(userIds[1]) { + return NewAppError("Channel.IsValid", "model.channel.is_valid.name.app_error", nil, "", http.StatusBadRequest) + } + return nil } @@ -188,6 +254,9 @@ func (o *Channel) PreSave() { o.Id = NewId() } + o.Name = SanitizeUnicode(o.Name) + o.DisplayName = SanitizeUnicode(o.DisplayName) + o.CreateAt = GetMillis() o.UpdateAt = o.CreateAt o.ExtraUpdateAt = 0 @@ -195,12 +264,18 @@ func (o *Channel) PreSave() { func (o *Channel) PreUpdate() { o.UpdateAt = GetMillis() + o.Name = SanitizeUnicode(o.Name) + o.DisplayName = SanitizeUnicode(o.DisplayName) } func (o *Channel) IsGroupOrDirect() bool { return o.Type == CHANNEL_DIRECT || o.Type == CHANNEL_GROUP } +func (o *Channel) IsOpen() bool { + return o.Type == CHANNEL_OPEN +} + func (o *Channel) Patch(patch *ChannelPatch) { if patch.DisplayName != nil { o.DisplayName = *patch.DisplayName @@ -239,12 +314,35 @@ func (o *Channel) IsGroupConstrained() bool { return o.GroupConstrained != nil && *o.GroupConstrained } +func (o *Channel) IsShared() bool { + return o.Shared != nil && *o.Shared +} + +func (o *Channel) GetOtherUserIdForDM(userId string) string { + if o.Type != CHANNEL_DIRECT { + return "" + } + + userIds := strings.Split(o.Name, "__") + + var otherUserId string + + if userIds[0] != userIds[1] { + if userIds[0] == userId { + otherUserId = userIds[1] + } else { + otherUserId = userIds[0] + } + } + + return otherUserId +} + func GetDMNameFromIds(userId1, userId2 string) string { if userId1 > userId2 { return userId2 + "__" + userId1 - } else { - return userId1 + "__" + userId2 } + return userId1 + "__" + userId2 } func GetGroupDisplayNameFromUsers(users []*User, truncate bool) string { diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel_count.go b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_count.go similarity index 95% rename from vendor/github.com/mattermost/mattermost-server/model/channel_count.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/channel_count.go index 8c6d8dd..11ddeec 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/channel_count.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_count.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel_data.go b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_data.go similarity index 93% rename from vendor/github.com/mattermost/mattermost-server/model/channel_data.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/channel_data.go index aae0a14..0a1e0d5 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/channel_data.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_data.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel_list.go b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_list.go similarity index 87% rename from vendor/github.com/mattermost/mattermost-server/model/channel_list.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/channel_list.go index b1db60e..4ba9e5c 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/channel_list.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_list.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -11,11 +11,11 @@ import ( type ChannelList []*Channel func (o *ChannelList) ToJson() string { - if b, err := json.Marshal(o); err != nil { + b, err := json.Marshal(o) + if err != nil { return "[]" - } else { - return string(b) } + return string(b) } func (o *ChannelList) Etag() string { @@ -55,11 +55,11 @@ func ChannelSliceFromJson(data io.Reader) []*Channel { type ChannelListWithTeamData []*ChannelWithTeamData func (o *ChannelListWithTeamData) ToJson() string { - if b, err := json.Marshal(o); err != nil { + b, err := json.Marshal(o) + if err != nil { return "[]" - } else { - return string(b) } + return string(b) } func (o *ChannelListWithTeamData) Etag() string { diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel_member.go b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_member.go similarity index 83% rename from vendor/github.com/mattermost/mattermost-server/model/channel_member.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/channel_member.go index 6bfa9bf..567492b 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/channel_member.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_member.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -31,6 +31,16 @@ type ChannelUnread struct { NotifyProps StringMap `json:"-"` } +type ChannelUnreadAt struct { + TeamId string `json:"team_id"` + UserId string `json:"user_id"` + ChannelId string `json:"channel_id"` + MsgCount int64 `json:"msg_count"` + MentionCount int64 `json:"mention_count"` + LastViewedAt int64 `json:"last_viewed_at"` + NotifyProps StringMap `json:"-"` +} + type ChannelMember struct { ChannelId string `json:"channel_id"` UserId string `json:"user_id"` @@ -55,11 +65,11 @@ type ChannelMemberForExport struct { } func (o *ChannelMembers) ToJson() string { - if b, err := json.Marshal(o); err != nil { + b, err := json.Marshal(o) + if err != nil { return "[]" - } else { - return string(b) } + return string(b) } func (o *ChannelUnread) ToJson() string { @@ -67,6 +77,11 @@ func (o *ChannelUnread) ToJson() string { return string(b) } +func (o *ChannelUnreadAt) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + func ChannelMembersFromJson(data io.Reader) *ChannelMembers { var o *ChannelMembers json.NewDecoder(data).Decode(&o) @@ -79,6 +94,12 @@ func ChannelUnreadFromJson(data io.Reader) *ChannelUnread { return o } +func ChannelUnreadAtFromJson(data io.Reader) *ChannelUnreadAt { + var o *ChannelUnreadAt + json.NewDecoder(data).Decode(&o) + return o +} + func (o *ChannelMember) ToJson() string { b, _ := json.Marshal(o) return string(b) @@ -92,11 +113,11 @@ func ChannelMemberFromJson(data io.Reader) *ChannelMember { func (o *ChannelMember) IsValid() *AppError { - if len(o.ChannelId) != 26 { + if !IsValidId(o.ChannelId) { return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.channel_id.app_error", nil, "", http.StatusBadRequest) } - if len(o.UserId) != 26 { + if !IsValidId(o.UserId) { return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) } @@ -143,6 +164,18 @@ func (o *ChannelMember) GetRoles() []string { return strings.Fields(o.Roles) } +func (o *ChannelMember) SetChannelMuted(muted bool) { + if o.IsChannelMuted() { + o.NotifyProps[MARK_UNREAD_NOTIFY_PROP] = CHANNEL_MARK_UNREAD_ALL + } else { + o.NotifyProps[MARK_UNREAD_NOTIFY_PROP] = CHANNEL_MARK_UNREAD_MENTION + } +} + +func (o *ChannelMember) IsChannelMuted() bool { + return o.NotifyProps[MARK_UNREAD_NOTIFY_PROP] == CHANNEL_MARK_UNREAD_MENTION +} + func IsChannelNotifyLevelValid(notifyLevel string) bool { return notifyLevel == CHANNEL_NOTIFY_DEFAULT || notifyLevel == CHANNEL_NOTIFY_ALL || diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel_member_history.go b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_member_history.go similarity index 54% rename from vendor/github.com/mattermost/mattermost-server/model/channel_member_history.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/channel_member_history.go index 55435c3..b77e0ff 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/channel_member_history.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_member_history.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel_member_history_result.go b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_member_history_result.go similarity index 53% rename from vendor/github.com/mattermost/mattermost-server/model/channel_member_history_result.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/channel_member_history_result.go index 11cb19d..8f43ca4 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/channel_member_history_result.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_member_history_result.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -10,7 +10,8 @@ type ChannelMemberHistoryResult struct { LeaveTime *int64 // these two fields are never set in the database - when we SELECT, we join on Users to get them - UserEmail string `db:"Email"` - Username string - IsBot bool + UserEmail string `db:"Email"` + Username string + IsBot bool + UserDeleteAt int64 } diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel_mentions.go b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_mentions.go similarity index 92% rename from vendor/github.com/mattermost/mattermost-server/model/channel_mentions.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/channel_mentions.go index 795ec37..eb14e8e 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/channel_mentions.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_mentions.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_search.go b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_search.go new file mode 100644 index 0000000..87fd3ae --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_search.go @@ -0,0 +1,39 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +const CHANNEL_SEARCH_DEFAULT_LIMIT = 50 + +type ChannelSearch struct { + Term string `json:"term"` + ExcludeDefaultChannels bool `json:"exclude_default_channels"` + NotAssociatedToGroup string `json:"not_associated_to_group"` + TeamIds []string `json:"team_ids"` + GroupConstrained bool `json:"group_constrained"` + ExcludeGroupConstrained bool `json:"exclude_group_constrained"` + Public bool `json:"public"` + Private bool `json:"private"` + IncludeDeleted bool `json:"include_deleted"` + Deleted bool `json:"deleted"` + Page *int `json:"page,omitempty"` + PerPage *int `json:"per_page,omitempty"` +} + +// ToJson convert a Channel to a json string +func (c *ChannelSearch) ToJson() string { + b, _ := json.Marshal(c) + return string(b) +} + +// ChannelSearchFromJson will decode the input and return a Channel +func ChannelSearchFromJson(data io.Reader) *ChannelSearch { + var cs *ChannelSearch + json.NewDecoder(data).Decode(&cs) + return cs +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_sidebar.go b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_sidebar.go new file mode 100644 index 0000000..f1e58eb --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_sidebar.go @@ -0,0 +1,125 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "regexp" +) + +type SidebarCategoryType string +type SidebarCategorySorting string + +const ( + // Each sidebar category has a 'type'. System categories are Channels, Favorites and DMs + // All user-created categories will have type Custom + SidebarCategoryChannels SidebarCategoryType = "channels" + SidebarCategoryDirectMessages SidebarCategoryType = "direct_messages" + SidebarCategoryFavorites SidebarCategoryType = "favorites" + SidebarCategoryCustom SidebarCategoryType = "custom" + // Increment to use when adding/reordering things in the sidebar + MinimalSidebarSortDistance = 10 + // Default Sort Orders for categories + DefaultSidebarSortOrderFavorites = 0 + DefaultSidebarSortOrderChannels = DefaultSidebarSortOrderFavorites + MinimalSidebarSortDistance + DefaultSidebarSortOrderDMs = DefaultSidebarSortOrderChannels + MinimalSidebarSortDistance + // Sorting modes + // default for all categories except DMs (behaves like manual) + SidebarCategorySortDefault SidebarCategorySorting = "" + // sort manually + SidebarCategorySortManual SidebarCategorySorting = "manual" + // sort by recency (default for DMs) + SidebarCategorySortRecent SidebarCategorySorting = "recent" + // sort by display name alphabetically + SidebarCategorySortAlphabetical SidebarCategorySorting = "alpha" +) + +// SidebarCategory represents the corresponding DB table +// SortOrder is never returned to the user and only used for queries +type SidebarCategory struct { + Id string `json:"id"` + UserId string `json:"user_id"` + TeamId string `json:"team_id"` + SortOrder int64 `json:"-"` + Sorting SidebarCategorySorting `json:"sorting"` + Type SidebarCategoryType `json:"type"` + DisplayName string `json:"display_name"` + Muted bool `json:"muted"` +} + +// SidebarCategoryWithChannels combines data from SidebarCategory table with the Channel IDs that belong to that category +type SidebarCategoryWithChannels struct { + SidebarCategory + Channels []string `json:"channel_ids"` +} + +type SidebarCategoryOrder []string + +// OrderedSidebarCategories combines categories, their channel IDs and an array of Category IDs, sorted +type OrderedSidebarCategories struct { + Categories SidebarCategoriesWithChannels `json:"categories"` + Order SidebarCategoryOrder `json:"order"` +} + +type SidebarChannel struct { + ChannelId string `json:"channel_id"` + UserId string `json:"user_id"` + CategoryId string `json:"category_id"` + SortOrder int64 `json:"-"` +} + +type SidebarChannels []*SidebarChannel +type SidebarCategoriesWithChannels []*SidebarCategoryWithChannels + +func SidebarCategoryFromJson(data io.Reader) (*SidebarCategoryWithChannels, error) { + var o *SidebarCategoryWithChannels + err := json.NewDecoder(data).Decode(&o) + return o, err +} + +func SidebarCategoriesFromJson(data io.Reader) ([]*SidebarCategoryWithChannels, error) { + var o []*SidebarCategoryWithChannels + err := json.NewDecoder(data).Decode(&o) + return o, err +} + +func OrderedSidebarCategoriesFromJson(data io.Reader) (*OrderedSidebarCategories, error) { + var o *OrderedSidebarCategories + err := json.NewDecoder(data).Decode(&o) + return o, err +} + +func (o SidebarCategoryWithChannels) ToJson() []byte { + b, _ := json.Marshal(o) + return b +} + +func SidebarCategoriesWithChannelsToJson(o []*SidebarCategoryWithChannels) []byte { + b, err := json.Marshal(o) + if err != nil { + return []byte("[]") + } + return b +} + +func (o OrderedSidebarCategories) ToJson() []byte { + b, err := json.Marshal(o) + if err != nil { + return []byte("[]") + } + return b +} + +var categoryIdPattern = regexp.MustCompile("(favorites|channels|direct_messages)_[a-z0-9]{26}_[a-z0-9]{26}") + +func IsValidCategoryId(s string) bool { + // Category IDs can either be regular IDs + if IsValidId(s) { + return true + } + + // Or default categories can follow the pattern {type}_{userID}_{teamID} + return categoryIdPattern.MatchString(s) +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel_stats.go b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_stats.go similarity index 61% rename from vendor/github.com/mattermost/mattermost-server/model/channel_stats.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/channel_stats.go index 21af920..76f682a 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/channel_stats.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_stats.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -9,8 +9,10 @@ import ( ) type ChannelStats struct { - ChannelId string `json:"channel_id"` - MemberCount int64 `json:"member_count"` + ChannelId string `json:"channel_id"` + MemberCount int64 `json:"member_count"` + GuestCount int64 `json:"guest_count"` + PinnedPostCount int64 `json:"pinnedpost_count"` } func (o *ChannelStats) ToJson() string { diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel_view.go b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_view.go similarity index 87% rename from vendor/github.com/mattermost/mattermost-server/model/channel_view.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/channel_view.go index 650d14c..42fcac3 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/channel_view.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_view.go @@ -1,5 +1,5 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model diff --git a/vendor/github.com/mattermost/mattermost-server/model/client4.go b/vendor/github.com/mattermost/mattermost-server/v5/model/client4.go similarity index 77% rename from vendor/github.com/mattermost/mattermost-server/model/client4.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/client4.go index 1f64135..4d715ac 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/client4.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/client4.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -10,6 +10,7 @@ import ( "io" "io/ioutil" "mime/multipart" + "net" "net/http" "net/url" "strconv" @@ -30,6 +31,7 @@ const ( HEADER_CSRF_TOKEN = "X-CSRF-Token" HEADER_BEARER = "BEARER" HEADER_AUTH = "Authorization" + HEADER_CLOUD_TOKEN = "X-Cloud-Token" HEADER_REQUESTED_WITH = "X-Requested-With" HEADER_REQUESTED_WITH_XML = "XMLHttpRequest" STATUS = "status" @@ -61,11 +63,44 @@ type Client4 struct { AuthToken string AuthType string HttpHeader map[string]string // Headers to be copied over for each request + + // TrueString is the string value sent to the server for true boolean query parameters. + trueString string + + // FalseString is the string value sent to the server for false boolean query parameters. + falseString string +} + +// SetBoolString is a helper method for overriding how true and false query string parameters are +// sent to the server. +// +// This method is only exposed for testing. It is never necessary to configure these values +// in production. +func (c *Client4) SetBoolString(value bool, valueStr string) { + if value { + c.trueString = valueStr + } else { + c.falseString = valueStr + } +} + +// boolString builds the query string parameter for boolean values. +func (c *Client4) boolString(value bool) string { + if value && c.trueString != "" { + return c.trueString + } else if !value && c.falseString != "" { + return c.falseString + } + + if value { + return "true" + } + return "false" } func closeBody(r *http.Response) { if r.Body != nil { - _, _ = ioutil.ReadAll(r.Body) + _, _ = io.Copy(ioutil.Discard, r.Body) _ = r.Body.Close() } } @@ -81,7 +116,21 @@ func (c *Client4) Must(result interface{}, resp *Response) interface{} { } func NewAPIv4Client(url string) *Client4 { - return &Client4{url, url + API_URL_SUFFIX, &http.Client{}, "", "", map[string]string{}} + url = strings.TrimRight(url, "/") + return &Client4{url, url + API_URL_SUFFIX, &http.Client{}, "", "", map[string]string{}, "", ""} +} + +func NewAPIv4SocketClient(socketPath string) *Client4 { + tr := &http.Transport{ + Dial: func(network, addr string) (net.Conn, error) { + return net.Dial("unix", socketPath) + }, + } + + client := NewAPIv4Client("http://_") + client.HttpClient = &http.Client{Transport: tr} + + return client } func BuildErrorResponse(r *http.Response, err *AppError) *Response { @@ -112,11 +161,16 @@ func BuildResponse(r *http.Response) *Response { } } -func (c *Client4) MockSession(sessionToken string) { - c.AuthToken = sessionToken +func (c *Client4) SetToken(token string) { + c.AuthToken = token c.AuthType = HEADER_BEARER } +// MockSession is deprecated in favour of SetToken +func (c *Client4) MockSession(token string) { + c.SetToken(token) +} + func (c *Client4) SetOAuthToken(token string) { c.AuthToken = token c.AuthType = HEADER_TOKEN @@ -128,13 +182,25 @@ func (c *Client4) ClearOAuthToken() { } func (c *Client4) GetUsersRoute() string { - return fmt.Sprintf("/users") + return "/users" } func (c *Client4) GetUserRoute(userId string) string { return fmt.Sprintf(c.GetUsersRoute()+"/%v", userId) } +func (c *Client4) GetUserThreadsRoute(userID, teamID string) string { + return c.GetUserRoute(userID) + c.GetTeamRoute(teamID) + "/threads" +} + +func (c *Client4) GetUserThreadRoute(userId, teamId, threadId string) string { + return c.GetUserThreadsRoute(userId, teamId) + "/" + threadId +} + +func (c *Client4) GetUserCategoryRoute(userID, teamID string) string { + return c.GetUserRoute(userID) + c.GetTeamRoute(teamID) + "/channels/categories" +} + func (c *Client4) GetUserAccessTokensRoute() string { return fmt.Sprintf(c.GetUsersRoute() + "/tokens") } @@ -152,7 +218,7 @@ func (c *Client4) GetUserByEmailRoute(email string) string { } func (c *Client4) GetBotsRoute() string { - return fmt.Sprintf("/bots") + return "/bots" } func (c *Client4) GetBotRoute(botUserId string) string { @@ -160,7 +226,7 @@ func (c *Client4) GetBotRoute(botUserId string) string { } func (c *Client4) GetTeamsRoute() string { - return fmt.Sprintf("/teams") + return "/teams" } func (c *Client4) GetTeamRoute(teamId string) string { @@ -192,7 +258,7 @@ func (c *Client4) GetTeamImportRoute(teamId string) string { } func (c *Client4) GetChannelsRoute() string { - return fmt.Sprintf("/channels") + return "/channels" } func (c *Client4) GetChannelsForTeamRoute(teamId string) string { @@ -207,6 +273,15 @@ func (c *Client4) GetChannelByNameRoute(channelName, teamId string) string { return fmt.Sprintf(c.GetTeamRoute(teamId)+"/channels/name/%v", channelName) } +func (c *Client4) GetChannelsForTeamForUserRoute(teamId, userId string, includeDeleted bool) string { + route := fmt.Sprintf(c.GetUserRoute(userId) + c.GetTeamRoute(teamId) + "/channels") + if includeDeleted { + query := fmt.Sprintf("?include_deleted=%v", includeDeleted) + return route + query + } + return route +} + func (c *Client4) GetChannelByNameForTeamNameRoute(channelName, teamName string) string { return fmt.Sprintf(c.GetTeamByNameRoute(teamName)+"/channels/name/%v", channelName) } @@ -220,19 +295,19 @@ func (c *Client4) GetChannelMemberRoute(channelId, userId string) string { } func (c *Client4) GetPostsRoute() string { - return fmt.Sprintf("/posts") + return "/posts" } func (c *Client4) GetPostsEphemeralRoute() string { - return fmt.Sprintf("/posts/ephemeral") + return "/posts/ephemeral" } func (c *Client4) GetConfigRoute() string { - return fmt.Sprintf("/config") + return "/config" } func (c *Client4) GetLicenseRoute() string { - return fmt.Sprintf("/license") + return "/license" } func (c *Client4) GetPostRoute(postId string) string { @@ -240,15 +315,23 @@ func (c *Client4) GetPostRoute(postId string) string { } func (c *Client4) GetFilesRoute() string { - return fmt.Sprintf("/files") + return "/files" } func (c *Client4) GetFileRoute(fileId string) string { return fmt.Sprintf(c.GetFilesRoute()+"/%v", fileId) } +func (c *Client4) GetUploadsRoute() string { + return "/uploads" +} + +func (c *Client4) GetUploadRoute(uploadId string) string { + return fmt.Sprintf("%s/%s", c.GetUploadsRoute(), uploadId) +} + func (c *Client4) GetPluginsRoute() string { - return fmt.Sprintf("/plugins") + return "/plugins" } func (c *Client4) GetPluginRoute(pluginId string) string { @@ -256,31 +339,39 @@ func (c *Client4) GetPluginRoute(pluginId string) string { } func (c *Client4) GetSystemRoute() string { - return fmt.Sprintf("/system") + return "/system" +} + +func (c *Client4) GetCloudRoute() string { + return "/cloud" } func (c *Client4) GetTestEmailRoute() string { - return fmt.Sprintf("/email/test") + return "/email/test" +} + +func (c *Client4) GetTestSiteURLRoute() string { + return "/site_url/test" } func (c *Client4) GetTestS3Route() string { - return fmt.Sprintf("/file/s3_test") + return "/file/s3_test" } func (c *Client4) GetDatabaseRoute() string { - return fmt.Sprintf("/database") + return "/database" } func (c *Client4) GetCacheRoute() string { - return fmt.Sprintf("/caches") + return "/caches" } func (c *Client4) GetClusterRoute() string { - return fmt.Sprintf("/cluster") + return "/cluster" } func (c *Client4) GetIncomingWebhooksRoute() string { - return fmt.Sprintf("/hooks/incoming") + return "/hooks/incoming" } func (c *Client4) GetIncomingWebhookRoute(hookID string) string { @@ -288,7 +379,7 @@ func (c *Client4) GetIncomingWebhookRoute(hookID string) string { } func (c *Client4) GetComplianceReportsRoute() string { - return fmt.Sprintf("/compliance/reports") + return "/compliance/reports" } func (c *Client4) GetComplianceReportRoute(reportId string) string { @@ -296,7 +387,7 @@ func (c *Client4) GetComplianceReportRoute(reportId string) string { } func (c *Client4) GetOutgoingWebhooksRoute() string { - return fmt.Sprintf("/hooks/outgoing") + return "/hooks/outgoing" } func (c *Client4) GetOutgoingWebhookRoute(hookID string) string { @@ -316,35 +407,43 @@ func (c *Client4) GetUserStatusesRoute() string { } func (c *Client4) GetSamlRoute() string { - return fmt.Sprintf("/saml") + return "/saml" } func (c *Client4) GetLdapRoute() string { - return fmt.Sprintf("/ldap") + return "/ldap" } func (c *Client4) GetBrandRoute() string { - return fmt.Sprintf("/brand") + return "/brand" } func (c *Client4) GetDataRetentionRoute() string { - return fmt.Sprintf("/data_retention") + return "/data_retention" } func (c *Client4) GetElasticsearchRoute() string { - return fmt.Sprintf("/elasticsearch") + return "/elasticsearch" +} + +func (c *Client4) GetBleveRoute() string { + return "/bleve" } func (c *Client4) GetCommandsRoute() string { - return fmt.Sprintf("/commands") + return "/commands" } func (c *Client4) GetCommandRoute(commandId string) string { return fmt.Sprintf(c.GetCommandsRoute()+"/%v", commandId) } +func (c *Client4) GetCommandMoveRoute(commandId string) string { + return fmt.Sprintf(c.GetCommandsRoute()+"/%v/move", commandId) +} + func (c *Client4) GetEmojisRoute() string { - return fmt.Sprintf("/emoji") + return "/emoji" } func (c *Client4) GetEmojiRoute(emojiId string) string { @@ -356,11 +455,11 @@ func (c *Client4) GetEmojiByNameRoute(name string) string { } func (c *Client4) GetReactionsRoute() string { - return fmt.Sprintf("/reactions") + return "/reactions" } func (c *Client4) GetOAuthAppsRoute() string { - return fmt.Sprintf("/oauth/apps") + return "/oauth/apps" } func (c *Client4) GetOAuthAppRoute(appId string) string { @@ -368,19 +467,19 @@ func (c *Client4) GetOAuthAppRoute(appId string) string { } func (c *Client4) GetOpenGraphRoute() string { - return fmt.Sprintf("/opengraph") + return "/opengraph" } func (c *Client4) GetJobsRoute() string { - return fmt.Sprintf("/jobs") + return "/jobs" } func (c *Client4) GetRolesRoute() string { - return fmt.Sprintf("/roles") + return "/roles" } func (c *Client4) GetSchemesRoute() string { - return fmt.Sprintf("/schemes") + return "/schemes" } func (c *Client4) GetSchemeRoute(id string) string { @@ -388,7 +487,7 @@ func (c *Client4) GetSchemeRoute(id string) string { } func (c *Client4) GetAnalyticsRoute() string { - return fmt.Sprintf("/analytics") + return "/analytics" } func (c *Client4) GetTimezonesRoute() string { @@ -408,7 +507,11 @@ func (c *Client4) GetTotalUsersStatsRoute() string { } func (c *Client4) GetRedirectLocationRoute() string { - return fmt.Sprintf("/redirect_location") + return "/redirect_location" +} + +func (c *Client4) GetServerBusyRoute() string { + return "/server_busy" } func (c *Client4) GetUserTermsOfServiceRoute(userId string) string { @@ -423,6 +526,10 @@ func (c *Client4) GetGroupsRoute() string { return "/groups" } +func (c *Client4) GetPublishUserTypingRoute(userId string) string { + return c.GetUserRoute(userId) + "/typing" +} + func (c *Client4) GetGroupRoute(groupID string) string { return fmt.Sprintf("%s/%s", c.GetGroupsRoute(), groupID) } @@ -435,6 +542,10 @@ func (c *Client4) GetGroupSyncablesRoute(groupID string, syncableType GroupSynca return fmt.Sprintf("%s/%ss", c.GetGroupRoute(groupID), strings.ToLower(syncableType.String())) } +func (c *Client4) GetImportsRoute() string { + return "/imports" +} + func (c *Client4) DoApiGet(url string, etag string) (*http.Response, *AppError) { return c.DoApiRequest(http.MethodGet, c.ApiUrl+url, "", etag) } @@ -620,7 +731,7 @@ func (c *Client4) LoginByLdap(loginId string, password string) (*User, *Response m := make(map[string]string) m["login_id"] = loginId m["password"] = password - m["ldap_only"] = "true" + m["ldap_only"] = c.boolString(true) return c.login(m) } @@ -871,6 +982,17 @@ func (c *Client4) GetRecentlyActiveUsersInTeam(teamId string, page int, perPage return UserListFromJson(r.Body), BuildResponse(r) } +// GetActiveUsersInTeam returns a page of users on a team. Page counting starts at 0. +func (c *Client4) GetActiveUsersInTeam(teamId string, page int, perPage int, etag string) ([]*User, *Response) { + query := fmt.Sprintf("?active=true&in_team=%v&page=%v&per_page=%v", teamId, page, perPage) + r, err := c.DoApiGet(c.GetUsersRoute()+query, etag) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return UserListFromJson(r.Body), BuildResponse(r) +} + // GetUsersNotInTeam returns a page of users who are not in a team. Page counting starts at 0. func (c *Client4) GetUsersNotInTeam(teamId string, page int, perPage int, etag string) ([]*User, *Response) { query := fmt.Sprintf("?not_in_team=%v&page=%v&per_page=%v", teamId, page, perPage) @@ -926,6 +1048,17 @@ func (c *Client4) GetUsersWithoutTeam(page int, perPage int, etag string) ([]*Us return UserListFromJson(r.Body), BuildResponse(r) } +// GetUsersInGroup returns a page of users in a group. Page counting starts at 0. +func (c *Client4) GetUsersInGroup(groupID string, page int, perPage int, etag string) ([]*User, *Response) { + query := fmt.Sprintf("?in_group=%v&page=%v&per_page=%v", groupID, page, perPage) + r, err := c.DoApiGet(c.GetUsersRoute()+query, etag) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return UserListFromJson(r.Body), BuildResponse(r) +} + // GetUsersByIds returns a list of users based on the provided user ids. func (c *Client4) GetUsersByIds(userIds []string) ([]*User, *Response) { r, err := c.DoApiPost(c.GetUsersRoute()+"/ids", ArrayToJson(userIds)) @@ -1078,6 +1211,37 @@ func (c *Client4) UpdateUserPassword(userId, currentPassword, newPassword string return CheckStatusOK(r), BuildResponse(r) } +// UpdateUserHashedPassword updates a user's password with an already-hashed password. Must be a system administrator. +func (c *Client4) UpdateUserHashedPassword(userId, newHashedPassword string) (bool, *Response) { + requestBody := map[string]string{"already_hashed": "true", "new_password": newHashedPassword} + r, err := c.DoApiPut(c.GetUserRoute(userId)+"/password", MapToJson(requestBody)) + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + +// PromoteGuestToUser convert a guest into a regular user +func (c *Client4) PromoteGuestToUser(guestId string) (bool, *Response) { + r, err := c.DoApiPost(c.GetUserRoute(guestId)+"/promote", "") + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + +// DemoteUserToGuest convert a regular user into a guest +func (c *Client4) DemoteUserToGuest(guestId string) (bool, *Response) { + r, err := c.DoApiPost(c.GetUserRoute(guestId)+"/demote", "") + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + // UpdateUserRoles updates a user's roles in the system. A user can have "system_user" and "system_admin" roles. func (c *Client4) UpdateUserRoles(userId, roles string) (bool, *Response) { requestBody := map[string]string{"roles": roles} @@ -1112,6 +1276,50 @@ func (c *Client4) DeleteUser(userId string) (bool, *Response) { return CheckStatusOK(r), BuildResponse(r) } +// PermanentDeleteUser deletes a user in the system based on the provided user id string. +func (c *Client4) PermanentDeleteUser(userId string) (bool, *Response) { + r, err := c.DoApiDelete(c.GetUserRoute(userId) + "?permanent=" + c.boolString(true)) + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + +// ConvertUserToBot converts a user to a bot user. +func (c *Client4) ConvertUserToBot(userId string) (*Bot, *Response) { + r, err := c.DoApiPost(c.GetUserRoute(userId)+"/convert_to_bot", "") + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return BotFromJson(r.Body), BuildResponse(r) +} + +// ConvertBotToUser converts a bot user to a user. +func (c *Client4) ConvertBotToUser(userId string, userPatch *UserPatch, setSystemAdmin bool) (*User, *Response) { + var query string + if setSystemAdmin { + query = "?set_system_admin=true" + } + r, err := c.DoApiPost(c.GetBotRoute(userId)+"/convert_to_user"+query, userPatch.ToJson()) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return UserFromJson(r.Body), BuildResponse(r) +} + +// PermanentDeleteAll permanently deletes all users in the system. This is a local only endpoint +func (c *Client4) PermanentDeleteAllUsers() (bool, *Response) { + r, err := c.DoApiDelete(c.GetUsersRoute()) + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + // SendPasswordResetEmail will send a link for password resetting to a user with the // provided email. func (c *Client4) SendPasswordResetEmail(email string) (bool, *Response) { @@ -1226,6 +1434,16 @@ func (c *Client4) VerifyUserEmail(token string) (bool, *Response) { return CheckStatusOK(r), BuildResponse(r) } +// VerifyUserEmailWithoutToken will verify a user's email by its Id. (Requires manage system role) +func (c *Client4) VerifyUserEmailWithoutToken(userId string) (*User, *Response) { + r, err := c.DoApiPost(c.GetUserRoute(userId)+"/email/verify/member", "") + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return UserFromJson(r.Body), BuildResponse(r) +} + // SendVerificationEmail will send an email to the user with the provided email address, if // that user exists. The email will contain a link that can be used to verify the user's // email address. @@ -1426,7 +1644,7 @@ func (c *Client4) GetBot(userId string, etag string) (*Bot, *Response) { // GetBot fetches the given bot, even if it is deleted. func (c *Client4) GetBotIncludeDeleted(userId string, etag string) (*Bot, *Response) { - r, err := c.DoApiGet(c.GetBotRoute(userId)+"?include_deleted=true", etag) + r, err := c.DoApiGet(c.GetBotRoute(userId)+"?include_deleted="+c.boolString(true), etag) if err != nil { return nil, BuildErrorResponse(r, err) } @@ -1447,7 +1665,7 @@ func (c *Client4) GetBots(page, perPage int, etag string) ([]*Bot, *Response) { // GetBotsIncludeDeleted fetches the given page of bots, including deleted. func (c *Client4) GetBotsIncludeDeleted(page, perPage int, etag string) ([]*Bot, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v&include_deleted=true", page, perPage) + query := fmt.Sprintf("?page=%v&per_page=%v&include_deleted="+c.boolString(true), page, perPage) r, err := c.DoApiGet(c.GetBotsRoute()+query, etag) if err != nil { return nil, BuildErrorResponse(r, err) @@ -1458,7 +1676,7 @@ func (c *Client4) GetBotsIncludeDeleted(page, perPage int, etag string) ([]*Bot, // GetBotsOrphaned fetches the given page of bots, only including orphanded bots. func (c *Client4) GetBotsOrphaned(page, perPage int, etag string) ([]*Bot, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v&only_orphaned=true", page, perPage) + query := fmt.Sprintf("?page=%v&per_page=%v&only_orphaned="+c.boolString(true), page, perPage) r, err := c.DoApiGet(c.GetBotsRoute()+query, etag) if err != nil { return nil, BuildErrorResponse(r, err) @@ -1598,7 +1816,7 @@ func (c *Client4) GetAllTeams(etag string, page int, perPage int) ([]*Team, *Res // GetAllTeamsWithTotalCount returns all teams based on permissions. func (c *Client4) GetAllTeamsWithTotalCount(etag string, page int, perPage int) ([]*Team, int64, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v&include_total_count=true", page, perPage) + query := fmt.Sprintf("?page=%v&per_page=%v&include_total_count="+c.boolString(true), page, perPage) r, err := c.DoApiGet(c.GetTeamsRoute()+query, etag) if err != nil { return nil, 0, BuildErrorResponse(r, err) @@ -1628,6 +1846,23 @@ func (c *Client4) SearchTeams(search *TeamSearch) ([]*Team, *Response) { return TeamListFromJson(r.Body), BuildResponse(r) } +// SearchTeamsPaged returns a page of teams and the total count matching the provided search term. +func (c *Client4) SearchTeamsPaged(search *TeamSearch) ([]*Team, int64, *Response) { + if search.Page == nil { + search.Page = NewInt(0) + } + if search.PerPage == nil { + search.PerPage = NewInt(100) + } + r, err := c.DoApiPost(c.GetTeamsRoute()+"/search", search.ToJson()) + if err != nil { + return nil, 0, BuildErrorResponse(r, err) + } + defer closeBody(r) + twc := TeamsWithCountFromJson(r.Body) + return twc.Teams, twc.TotalCount, BuildResponse(r) +} + // TeamExists returns true or false if the team exist or not. func (c *Client4) TeamExists(name, etag string) (bool, *Response) { r, err := c.DoApiGet(c.GetTeamByNameRoute(name)+"/exists", etag) @@ -1700,6 +1935,16 @@ func (c *Client4) PatchTeam(teamId string, patch *TeamPatch) (*Team, *Response) return TeamFromJson(r.Body), BuildResponse(r) } +// RestoreTeam restores a previously deleted team. +func (c *Client4) RestoreTeam(teamId string) (*Team, *Response) { + r, err := c.DoApiPost(c.GetTeamRoute(teamId)+"/restore", "") + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return TeamFromJson(r.Body), BuildResponse(r) +} + // RegenerateTeamInviteId requests a new invite ID to be generated. func (c *Client4) RegenerateTeamInviteId(teamId string) (*Team, *Response) { r, err := c.DoApiPost(c.GetTeamRoute(teamId)+"/regenerate_invite_id", "") @@ -1723,7 +1968,7 @@ func (c *Client4) SoftDeleteTeam(teamId string) (bool, *Response) { // PermanentDeleteTeam deletes the team, should only be used when needed for // compliance and the like. func (c *Client4) PermanentDeleteTeam(teamId string) (bool, *Response) { - r, err := c.DoApiDelete(c.GetTeamRoute(teamId) + "?permanent=true") + r, err := c.DoApiDelete(c.GetTeamRoute(teamId) + "?permanent=" + c.boolString(true)) if err != nil { return false, BuildErrorResponse(r, err) } @@ -1731,6 +1976,18 @@ func (c *Client4) PermanentDeleteTeam(teamId string) (bool, *Response) { return CheckStatusOK(r), BuildResponse(r) } +// UpdateTeamPrivacy modifies the team type (model.TEAM_OPEN <--> model.TEAM_INVITE) and sets +// the corresponding AllowOpenInvite appropriately. +func (c *Client4) UpdateTeamPrivacy(teamId string, privacy string) (*Team, *Response) { + requestBody := map[string]string{"privacy": privacy} + r, err := c.DoApiPut(c.GetTeamRoute(teamId)+"/privacy", MapToJson(requestBody)) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return TeamFromJson(r.Body), BuildResponse(r) +} + // GetTeamMembers returns team members based on the provided team id string. func (c *Client4) GetTeamMembers(teamId string, page int, perPage int, etag string) ([]*TeamMember, *Response) { query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) @@ -1742,6 +1999,18 @@ func (c *Client4) GetTeamMembers(teamId string, page int, perPage int, etag stri return TeamMembersFromJson(r.Body), BuildResponse(r) } +// GetTeamMembersWithoutDeletedUsers returns team members based on the provided team id string. Additional parameters of sort and exclude_deleted_users accepted as well +// Could not add it to above function due to it be a breaking change. +func (c *Client4) GetTeamMembersSortAndWithoutDeletedUsers(teamId string, page int, perPage int, sort string, exclude_deleted_users bool, etag string) ([]*TeamMember, *Response) { + query := fmt.Sprintf("?page=%v&per_page=%v&sort=%v&exclude_deleted_users=%v", page, perPage, sort, exclude_deleted_users) + r, err := c.DoApiGet(c.GetTeamMembersRoute(teamId)+query, etag) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return TeamMembersFromJson(r.Body), BuildResponse(r) +} + // GetTeamMembersForUser returns the team members for a user. func (c *Client4) GetTeamMembersForUser(userId string, etag string) ([]*TeamMember, *Response) { r, err := c.DoApiGet(c.GetUserRoute(userId)+"/teams/members", etag) @@ -1811,6 +2080,22 @@ func (c *Client4) AddTeamMembers(teamId string, userIds []string) ([]*TeamMember return TeamMembersFromJson(r.Body), BuildResponse(r) } +// AddTeamMembers adds a number of users to a team and returns the team members. +func (c *Client4) AddTeamMembersGracefully(teamId string, userIds []string) ([]*TeamMemberWithError, *Response) { + var members []*TeamMember + for _, userId := range userIds { + member := &TeamMember{TeamId: teamId, UserId: userId} + members = append(members, member) + } + + r, err := c.DoApiPost(c.GetTeamMembersRoute(teamId)+"/batch?graceful="+c.boolString(true), TeamMembersToJson(members)) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return TeamMembersWithErrorFromJson(r.Body), BuildResponse(r) +} + // RemoveTeamMember will remove a user from a team. func (c *Client4) RemoveTeamMember(teamId, userId string) (bool, *Response) { r, err := c.DoApiDelete(c.GetTeamMemberRoute(teamId, userId)) @@ -1904,6 +2189,46 @@ func (c *Client4) InviteUsersToTeam(teamId string, userEmails []string) (bool, * return CheckStatusOK(r), BuildResponse(r) } +// InviteGuestsToTeam invite guest by email to some channels in a team. +func (c *Client4) InviteGuestsToTeam(teamId string, userEmails []string, channels []string, message string) (bool, *Response) { + guestsInvite := GuestsInvite{ + Emails: userEmails, + Channels: channels, + Message: message, + } + r, err := c.DoApiPost(c.GetTeamRoute(teamId)+"/invite-guests/email", guestsInvite.ToJson()) + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + +// InviteUsersToTeam invite users by email to the team. +func (c *Client4) InviteUsersToTeamGracefully(teamId string, userEmails []string) ([]*EmailInviteWithError, *Response) { + r, err := c.DoApiPost(c.GetTeamRoute(teamId)+"/invite/email?graceful="+c.boolString(true), ArrayToJson(userEmails)) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return EmailInviteWithErrorFromJson(r.Body), BuildResponse(r) +} + +// InviteGuestsToTeam invite guest by email to some channels in a team. +func (c *Client4) InviteGuestsToTeamGracefully(teamId string, userEmails []string, channels []string, message string) ([]*EmailInviteWithError, *Response) { + guestsInvite := GuestsInvite{ + Emails: userEmails, + Channels: channels, + Message: message, + } + r, err := c.DoApiPost(c.GetTeamRoute(teamId)+"/invite-guests/email?graceful="+c.boolString(true), guestsInvite.ToJson()) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return EmailInviteWithErrorFromJson(r.Body), BuildResponse(r) +} + // InvalidateEmailInvites will invalidate active email invitations that have not been accepted by the user. func (c *Client4) InvalidateEmailInvites() (bool, *Response) { r, err := c.DoApiDelete(c.GetTeamsRoute() + "/invites/email") @@ -1995,7 +2320,16 @@ func (c *Client4) RemoveTeamIcon(teamId string) (bool, *Response) { // GetAllChannels get all the channels. Must be a system administrator. func (c *Client4) GetAllChannels(page int, perPage int, etag string) (*ChannelListWithTeamData, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + return c.getAllChannels(page, perPage, etag, false) +} + +// GetAllChannelsIncludeDeleted get all the channels. Must be a system administrator. +func (c *Client4) GetAllChannelsIncludeDeleted(page int, perPage int, etag string) (*ChannelListWithTeamData, *Response) { + return c.getAllChannels(page, perPage, etag, true) +} + +func (c *Client4) getAllChannels(page int, perPage int, etag string, includeDeleted bool) (*ChannelListWithTeamData, *Response) { + query := fmt.Sprintf("?page=%v&per_page=%v&include_deleted=%v", page, perPage, includeDeleted) r, err := c.DoApiGet(c.GetChannelsRoute()+query, etag) if err != nil { return nil, BuildErrorResponse(r, err) @@ -2006,7 +2340,7 @@ func (c *Client4) GetAllChannels(page int, perPage int, etag string) (*ChannelLi // GetAllChannelsWithCount get all the channels including the total count. Must be a system administrator. func (c *Client4) GetAllChannelsWithCount(page int, perPage int, etag string) (*ChannelListWithTeamData, int64, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v&include_total_count=true", page, perPage) + query := fmt.Sprintf("?page=%v&per_page=%v&include_total_count="+c.boolString(true), page, perPage) r, err := c.DoApiGet(c.GetChannelsRoute()+query, etag) if err != nil { return nil, 0, BuildErrorResponse(r, err) @@ -2056,6 +2390,17 @@ func (c *Client4) ConvertChannelToPrivate(channelId string) (*Channel, *Response return ChannelFromJson(r.Body), BuildResponse(r) } +// UpdateChannelPrivacy updates channel privacy +func (c *Client4) UpdateChannelPrivacy(channelId string, privacy string) (*Channel, *Response) { + requestBody := map[string]string{"privacy": privacy} + r, err := c.DoApiPut(c.GetChannelRoute(channelId)+"/privacy", MapToJson(requestBody)) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return ChannelFromJson(r.Body), BuildResponse(r) +} + // RestoreChannel restores a previously deleted channel. Any missing fields are not updated. func (c *Client4) RestoreChannel(channelId string) (*Channel, *Response) { r, err := c.DoApiPost(c.GetChannelRoute(channelId)+"/restore", "") @@ -2128,6 +2473,17 @@ func (c *Client4) GetPinnedPosts(channelId string, etag string) (*PostList, *Res return PostListFromJson(r.Body), BuildResponse(r) } +// GetPrivateChannelsForTeam returns a list of private channels based on the provided team id string. +func (c *Client4) GetPrivateChannelsForTeam(teamId string, page int, perPage int, etag string) ([]*Channel, *Response) { + query := fmt.Sprintf("/private?page=%v&per_page=%v", page, perPage) + r, err := c.DoApiGet(c.GetChannelsForTeamRoute(teamId)+query, etag) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return ChannelSliceFromJson(r.Body), BuildResponse(r) +} + // GetPublicChannelsForTeam returns a list of public channels based on the provided team id string. func (c *Client4) GetPublicChannelsForTeam(teamId string, page int, perPage int, etag string) ([]*Channel, *Response) { query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) @@ -2161,8 +2517,20 @@ func (c *Client4) GetPublicChannelsByIdsForTeam(teamId string, channelIds []stri } // GetChannelsForTeamForUser returns a list channels of on a team for a user. -func (c *Client4) GetChannelsForTeamForUser(teamId, userId, etag string) ([]*Channel, *Response) { - r, err := c.DoApiGet(c.GetUserRoute(userId)+c.GetTeamRoute(teamId)+"/channels", etag) +func (c *Client4) GetChannelsForTeamForUser(teamId, userId string, includeDeleted bool, etag string) ([]*Channel, *Response) { + r, err := c.DoApiGet(c.GetChannelsForTeamForUserRoute(teamId, userId, includeDeleted), etag) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return ChannelSliceFromJson(r.Body), BuildResponse(r) +} + +// GetChannelsForTeamAndUserWithLastDeleteAt returns a list channels of a team for a user, additionally filtered with lastDeleteAt. This does not have any effect if includeDeleted is set to false. +func (c *Client4) GetChannelsForTeamAndUserWithLastDeleteAt(teamId, userId string, includeDeleted bool, lastDeleteAt int, etag string) ([]*Channel, *Response) { + route := fmt.Sprintf(c.GetUserRoute(userId) + c.GetTeamRoute(teamId) + "/channels") + route += fmt.Sprintf("?include_deleted=%v&last_delete_at=%d", includeDeleted, lastDeleteAt) + r, err := c.DoApiGet(route, etag) if err != nil { return nil, BuildErrorResponse(r, err) } @@ -2180,6 +2548,16 @@ func (c *Client4) SearchChannels(teamId string, search *ChannelSearch) ([]*Chann return ChannelSliceFromJson(r.Body), BuildResponse(r) } +// SearchArchivedChannels returns the archived channels on a team matching the provided search term. +func (c *Client4) SearchArchivedChannels(teamId string, search *ChannelSearch) ([]*Channel, *Response) { + r, err := c.DoApiPost(c.GetChannelsForTeamRoute(teamId)+"/search_archived", search.ToJson()) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return ChannelSliceFromJson(r.Body), BuildResponse(r) +} + // SearchAllChannels search in all the channels. Must be a system administrator. func (c *Client4) SearchAllChannels(search *ChannelSearch) (*ChannelListWithTeamData, *Response) { r, err := c.DoApiPost(c.GetChannelsRoute()+"/search", search.ToJson()) @@ -2190,6 +2568,16 @@ func (c *Client4) SearchAllChannels(search *ChannelSearch) (*ChannelListWithTeam return ChannelListWithTeamDataFromJson(r.Body), BuildResponse(r) } +// SearchAllChannelsPaged searches all the channels and returns the results paged with the total count. +func (c *Client4) SearchAllChannelsPaged(search *ChannelSearch) (*ChannelsWithCount, *Response) { + r, err := c.DoApiPost(c.GetChannelsRoute()+"/search", search.ToJson()) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return ChannelsWithCountFromJson(r.Body), BuildResponse(r) +} + // SearchGroupChannels returns the group channels of the user whose members' usernames match the search term. func (c *Client4) SearchGroupChannels(search *ChannelSearch) ([]*Channel, *Response) { r, err := c.DoApiPost(c.GetChannelsRoute()+"/group/search", search.ToJson()) @@ -2210,6 +2598,30 @@ func (c *Client4) DeleteChannel(channelId string) (bool, *Response) { return CheckStatusOK(r), BuildResponse(r) } +// PermanentDeleteChannel deletes a channel based on the provided channel id string. +func (c *Client4) PermanentDeleteChannel(channelId string) (bool, *Response) { + r, err := c.DoApiDelete(c.GetChannelRoute(channelId) + "?permanent=" + c.boolString(true)) + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + +// MoveChannel moves the channel to the destination team. +func (c *Client4) MoveChannel(channelId, teamId string, force bool) (*Channel, *Response) { + requestBody := map[string]interface{}{ + "team_id": teamId, + "force": force, + } + r, err := c.DoApiPost(c.GetChannelRoute(channelId)+"/move", StringInterfaceToJson(requestBody)) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return ChannelFromJson(r.Body), BuildResponse(r) +} + // GetChannelByName returns a channel based on the provided channel name and team id strings. func (c *Client4) GetChannelByName(channelName, teamId string, etag string) (*Channel, *Response) { r, err := c.DoApiGet(c.GetChannelByNameRoute(channelName, teamId), etag) @@ -2222,7 +2634,7 @@ func (c *Client4) GetChannelByName(channelName, teamId string, etag string) (*Ch // GetChannelByNameIncludeDeleted returns a channel based on the provided channel name and team id strings. Other then GetChannelByName it will also return deleted channels. func (c *Client4) GetChannelByNameIncludeDeleted(channelName, teamId string, etag string) (*Channel, *Response) { - r, err := c.DoApiGet(c.GetChannelByNameRoute(channelName, teamId)+"?include_deleted=true", etag) + r, err := c.DoApiGet(c.GetChannelByNameRoute(channelName, teamId)+"?include_deleted="+c.boolString(true), etag) if err != nil { return nil, BuildErrorResponse(r, err) } @@ -2242,7 +2654,7 @@ func (c *Client4) GetChannelByNameForTeamName(channelName, teamName string, etag // GetChannelByNameForTeamNameIncludeDeleted returns a channel based on the provided channel name and team name strings. Other then GetChannelByNameForTeamName it will also return deleted channels. func (c *Client4) GetChannelByNameForTeamNameIncludeDeleted(channelName, teamName string, etag string) (*Channel, *Response) { - r, err := c.DoApiGet(c.GetChannelByNameForTeamNameRoute(channelName, teamName)+"?include_deleted=true", etag) + r, err := c.DoApiGet(c.GetChannelByNameForTeamNameRoute(channelName, teamName)+"?include_deleted="+c.boolString(true), etag) if err != nil { return nil, BuildErrorResponse(r, err) } @@ -2440,6 +2852,16 @@ func (c *Client4) PatchPost(postId string, patch *PostPatch) (*Post, *Response) return PostFromJson(r.Body), BuildResponse(r) } +// SetPostUnread marks channel where post belongs as unread on the time of the provided post. +func (c *Client4) SetPostUnread(userId string, postId string) *Response { + r, err := c.DoApiPost(c.GetUserRoute(userId)+c.GetPostRoute(postId)+"/set_unread", "") + if err != nil { + return BuildErrorResponse(r, err) + } + defer closeBody(r) + return BuildResponse(r) +} + // PinPost pin a post based on provided post id string. func (c *Client4) PinPost(postId string) (bool, *Response) { r, err := c.DoApiPost(c.GetPostRoute(postId)+"/pin", "") @@ -2481,8 +2903,12 @@ func (c *Client4) DeletePost(postId string) (bool, *Response) { } // GetPostThread gets a post with all the other posts in the same thread. -func (c *Client4) GetPostThread(postId string, etag string) (*PostList, *Response) { - r, err := c.DoApiGet(c.GetPostRoute(postId)+"/thread", etag) +func (c *Client4) GetPostThread(postId string, etag string, collapsedThreads bool) (*PostList, *Response) { + url := c.GetPostRoute(postId) + "/thread" + if collapsedThreads { + url += "?collapsedThreads=true" + } + r, err := c.DoApiGet(url, etag) if err != nil { return nil, BuildErrorResponse(r, err) } @@ -2491,8 +2917,11 @@ func (c *Client4) GetPostThread(postId string, etag string) (*PostList, *Respons } // GetPostsForChannel gets a page of posts with an array for ordering for a channel. -func (c *Client4) GetPostsForChannel(channelId string, page, perPage int, etag string) (*PostList, *Response) { +func (c *Client4) GetPostsForChannel(channelId string, page, perPage int, etag string, collapsedThreads bool) (*PostList, *Response) { query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + if collapsedThreads { + query += "&collapsedThreads=true" + } r, err := c.DoApiGet(c.GetChannelRoute(channelId)+"/posts"+query, etag) if err != nil { return nil, BuildErrorResponse(r, err) @@ -2514,7 +2943,7 @@ func (c *Client4) GetFlaggedPostsForUser(userId string, page int, perPage int) ( // GetFlaggedPostsForUserInTeam returns flagged posts in team of a user based on user id string. func (c *Client4) GetFlaggedPostsForUserInTeam(userId string, teamId string, page int, perPage int) (*PostList, *Response) { - if len(teamId) == 0 || len(teamId) != 26 { + if !IsValidId(teamId) { return nil, &Response{StatusCode: http.StatusBadRequest, Error: NewAppError("GetFlaggedPostsForUserInTeam", "model.client.get_flagged_posts_in_team.missing_parameter.app_error", nil, "", http.StatusBadRequest)} } @@ -2529,7 +2958,7 @@ func (c *Client4) GetFlaggedPostsForUserInTeam(userId string, teamId string, pag // GetFlaggedPostsForUserInChannel returns flagged posts in channel of a user based on user id string. func (c *Client4) GetFlaggedPostsForUserInChannel(userId string, channelId string, page int, perPage int) (*PostList, *Response) { - if len(channelId) == 0 || len(channelId) != 26 { + if !IsValidId(channelId) { return nil, &Response{StatusCode: http.StatusBadRequest, Error: NewAppError("GetFlaggedPostsForUserInChannel", "model.client.get_flagged_posts_in_channel.missing_parameter.app_error", nil, "", http.StatusBadRequest)} } @@ -2543,8 +2972,11 @@ func (c *Client4) GetFlaggedPostsForUserInChannel(userId string, channelId strin } // GetPostsSince gets posts created after a specified time as Unix time in milliseconds. -func (c *Client4) GetPostsSince(channelId string, time int64) (*PostList, *Response) { +func (c *Client4) GetPostsSince(channelId string, time int64, collapsedThreads bool) (*PostList, *Response) { query := fmt.Sprintf("?since=%v", time) + if collapsedThreads { + query += "&collapsedThreads=true" + } r, err := c.DoApiGet(c.GetChannelRoute(channelId)+"/posts"+query, "") if err != nil { return nil, BuildErrorResponse(r, err) @@ -2554,8 +2986,11 @@ func (c *Client4) GetPostsSince(channelId string, time int64) (*PostList, *Respo } // GetPostsAfter gets a page of posts that were posted after the post provided. -func (c *Client4) GetPostsAfter(channelId, postId string, page, perPage int, etag string) (*PostList, *Response) { +func (c *Client4) GetPostsAfter(channelId, postId string, page, perPage int, etag string, collapsedThreads bool) (*PostList, *Response) { query := fmt.Sprintf("?page=%v&per_page=%v&after=%v", page, perPage, postId) + if collapsedThreads { + query += "&collapsedThreads=true" + } r, err := c.DoApiGet(c.GetChannelRoute(channelId)+"/posts"+query, etag) if err != nil { return nil, BuildErrorResponse(r, err) @@ -2565,8 +3000,11 @@ func (c *Client4) GetPostsAfter(channelId, postId string, page, perPage int, eta } // GetPostsBefore gets a page of posts that were posted before the post provided. -func (c *Client4) GetPostsBefore(channelId, postId string, page, perPage int, etag string) (*PostList, *Response) { +func (c *Client4) GetPostsBefore(channelId, postId string, page, perPage int, etag string, collapsedThreads bool) (*PostList, *Response) { query := fmt.Sprintf("?page=%v&per_page=%v&before=%v", page, perPage, postId) + if collapsedThreads { + query += "&collapsedThreads=true" + } r, err := c.DoApiGet(c.GetChannelRoute(channelId)+"/posts"+query, etag) if err != nil { return nil, BuildErrorResponse(r, err) @@ -2576,14 +3014,17 @@ func (c *Client4) GetPostsBefore(channelId, postId string, page, perPage int, et } // GetPostsAroundLastUnread gets a list of posts around last unread post by a user in a channel. -func (c *Client4) GetPostsAroundLastUnread(userId, channelId string, limitBefore, limitAfter int) (*PostList, *Response) { +func (c *Client4) GetPostsAroundLastUnread(userId, channelId string, limitBefore, limitAfter int, collapsedThreads bool) (*PostList, *Response) { query := fmt.Sprintf("?limit_before=%v&limit_after=%v", limitBefore, limitAfter) - if r, err := c.DoApiGet(c.GetUserRoute(userId)+c.GetChannelRoute(channelId)+"/posts/unread"+query, ""); err != nil { + if collapsedThreads { + query += "&collapsedThreads=true" + } + r, err := c.DoApiGet(c.GetUserRoute(userId)+c.GetChannelRoute(channelId)+"/posts/unread"+query, "") + if err != nil { return nil, BuildErrorResponse(r, err) - } else { - defer closeBody(r) - return PostListFromJson(r.Body), BuildResponse(r) } + defer closeBody(r) + return PostListFromJson(r.Body), BuildResponse(r) } // SearchPosts returns any posts with matching terms string. @@ -2848,9 +3289,9 @@ func (c *Client4) GetPing() (string, *Response) { } // GetPingWithServerStatus will return ok if several basic server health checks -// all psss successfully. +// all pass successfully. func (c *Client4) GetPingWithServerStatus() (string, *Response) { - r, err := c.DoApiGet(c.GetSystemRoute()+"/ping?get_server_status=true", "") + r, err := c.DoApiGet(c.GetSystemRoute()+"/ping?get_server_status="+c.boolString(true), "") if r != nil && r.StatusCode == 500 { defer r.Body.Close() return STATUS_UNHEALTHY, BuildErrorResponse(r, err) @@ -2862,6 +3303,21 @@ func (c *Client4) GetPingWithServerStatus() (string, *Response) { return MapFromJson(r.Body)["status"], BuildResponse(r) } +// GetPingWithFullServerStatus will return the full status if several basic server +// health checks all pass successfully. +func (c *Client4) GetPingWithFullServerStatus() (map[string]string, *Response) { + r, err := c.DoApiGet(c.GetSystemRoute()+"/ping?get_server_status="+c.boolString(true), "") + if r != nil && r.StatusCode == 500 { + defer r.Body.Close() + return map[string]string{"status": STATUS_UNHEALTHY}, BuildErrorResponse(r, err) + } + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return MapFromJson(r.Body), BuildResponse(r) +} + // TestEmail will attempt to connect to the configured SMTP server. func (c *Client4) TestEmail(config *Config) (bool, *Response) { r, err := c.DoApiPost(c.GetTestEmailRoute(), config.ToJson()) @@ -2872,6 +3328,18 @@ func (c *Client4) TestEmail(config *Config) (bool, *Response) { return CheckStatusOK(r), BuildResponse(r) } +// TestSiteURL will test the validity of a site URL. +func (c *Client4) TestSiteURL(siteURL string) (bool, *Response) { + requestBody := make(map[string]string) + requestBody["site_url"] = siteURL + r, err := c.DoApiPost(c.GetTestSiteURLRoute(), MapToJson(requestBody)) + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + // TestS3Connection will attempt to connect to the AWS S3. func (c *Client4) TestS3Connection(config *Config) (bool, *Response) { r, err := c.DoApiPost(c.GetTestS3Route(), config.ToJson()) @@ -2966,6 +3434,19 @@ func (c *Client4) UpdateConfig(config *Config) (*Config, *Response) { return ConfigFromJson(r.Body), BuildResponse(r) } +// MigrateConfig will migrate existing config to the new one. +func (c *Client4) MigrateConfig(from, to string) (bool, *Response) { + m := make(map[string]string, 2) + m["from"] = from + m["to"] = to + r, err := c.DoApiPost(c.GetConfigRoute()+"/migrate", MapToJson(m)) + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return true, BuildResponse(r) +} + // UploadLicenseFile will add a license file to the system. func (c *Client4) UploadLicenseFile(data []byte) (bool, *Response) { body := &bytes.Buffer{} @@ -3249,7 +3730,7 @@ func (c *Client4) GetSamlMetadata() (string, *Response) { return buf.String(), BuildResponse(r) } -func samlFileToMultipart(data []byte, filename string) ([]byte, *multipart.Writer, error) { +func fileToMultipart(data []byte, filename string) ([]byte, *multipart.Writer, error) { body := &bytes.Buffer{} writer := multipart.NewWriter(body) @@ -3272,7 +3753,7 @@ func samlFileToMultipart(data []byte, filename string) ([]byte, *multipart.Write // UploadSamlIdpCertificate will upload an IDP certificate for SAML and set the config to use it. // The filename parameter is deprecated and ignored: the server will pick a hard-coded filename when writing to disk. func (c *Client4) UploadSamlIdpCertificate(data []byte, filename string) (bool, *Response) { - body, writer, err := samlFileToMultipart(data, filename) + body, writer, err := fileToMultipart(data, filename) if err != nil { return false, &Response{Error: NewAppError("UploadSamlIdpCertificate", "model.client.upload_saml_cert.app_error", nil, err.Error(), http.StatusBadRequest)} } @@ -3284,7 +3765,7 @@ func (c *Client4) UploadSamlIdpCertificate(data []byte, filename string) (bool, // UploadSamlPublicCertificate will upload a public certificate for SAML and set the config to use it. // The filename parameter is deprecated and ignored: the server will pick a hard-coded filename when writing to disk. func (c *Client4) UploadSamlPublicCertificate(data []byte, filename string) (bool, *Response) { - body, writer, err := samlFileToMultipart(data, filename) + body, writer, err := fileToMultipart(data, filename) if err != nil { return false, &Response{Error: NewAppError("UploadSamlPublicCertificate", "model.client.upload_saml_cert.app_error", nil, err.Error(), http.StatusBadRequest)} } @@ -3296,7 +3777,7 @@ func (c *Client4) UploadSamlPublicCertificate(data []byte, filename string) (boo // UploadSamlPrivateCertificate will upload a private key for SAML and set the config to use it. // The filename parameter is deprecated and ignored: the server will pick a hard-coded filename when writing to disk. func (c *Client4) UploadSamlPrivateCertificate(data []byte, filename string) (bool, *Response) { - body, writer, err := samlFileToMultipart(data, filename) + body, writer, err := fileToMultipart(data, filename) if err != nil { return false, &Response{Error: NewAppError("UploadSamlPrivateCertificate", "model.client.upload_saml_cert.app_error", nil, err.Error(), http.StatusBadRequest)} } @@ -3345,6 +3826,18 @@ func (c *Client4) GetSamlCertificateStatus() (*SamlCertificateStatus, *Response) return SamlCertificateStatusFromJson(r.Body), BuildResponse(r) } +func (c *Client4) GetSamlMetadataFromIdp(samlMetadataURL string) (*SamlMetadataResponse, *Response) { + requestBody := make(map[string]string) + requestBody["saml_metadata_url"] = samlMetadataURL + r, err := c.DoApiPost(c.GetSamlRoute()+"/metadatafromidp", MapToJson(requestBody)) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + + defer closeBody(r) + return SamlMetadataResponseFromJson(r.Body), BuildResponse(r) +} + // Compliance Section // CreateComplianceReport creates an incoming webhook for a channel. @@ -3452,7 +3945,19 @@ func (c *Client4) GetLdapGroups() ([]*Group, *Response) { } defer closeBody(r) - return GroupsFromJson(r.Body), BuildResponse(r) + responseData := struct { + Count int `json:"count"` + Groups []*Group `json:"groups"` + }{} + if err := json.NewDecoder(r.Body).Decode(&responseData); err != nil { + appErr := NewAppError("Api4.GetLdapGroups", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + return nil, BuildErrorResponse(r, appErr) + } + for i := range responseData.Groups { + responseData.Groups[i].DisplayName = *responseData.Groups[i].Name + } + + return responseData.Groups, BuildResponse(r) } // LinkLdapGroup creates or undeletes a Mattermost group and associates it to the given LDAP group DN. @@ -3481,9 +3986,21 @@ func (c *Client4) UnlinkLdapGroup(dn string) (*Group, *Response) { return GroupFromJson(r.Body), BuildResponse(r) } +// MigrateIdLdap migrates the LDAP enabled users to given attribute +func (c *Client4) MigrateIdLdap(toAttribute string) (bool, *Response) { + r, err := c.DoApiPost(c.GetLdapRoute()+"/migrateid", MapToJson(map[string]string{ + "toAttribute": toAttribute, + })) + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + // GetGroupsByChannel retrieves the Mattermost Groups associated with a given channel -func (c *Client4) GetGroupsByChannel(channelId string, opts GroupSearchOpts) ([]*Group, int, *Response) { - path := fmt.Sprintf("%s/groups?q=%v&include_member_count=%v", c.GetChannelRoute(channelId), opts.Q, opts.IncludeMemberCount) +func (c *Client4) GetGroupsByChannel(channelId string, opts GroupSearchOpts) ([]*GroupWithSchemeAdmin, int, *Response) { + path := fmt.Sprintf("%s/groups?q=%v&include_member_count=%v&filter_allow_reference=%v", c.GetChannelRoute(channelId), opts.Q, opts.IncludeMemberCount, opts.FilterAllowReference) if opts.PageOpts != nil { path = fmt.Sprintf("%s&page=%v&per_page=%v", path, opts.PageOpts.Page, opts.PageOpts.PerPage) } @@ -3494,8 +4011,8 @@ func (c *Client4) GetGroupsByChannel(channelId string, opts GroupSearchOpts) ([] defer closeBody(r) responseData := struct { - Groups []*Group `json:"groups"` - Count int `json:"total_group_count"` + Groups []*GroupWithSchemeAdmin `json:"groups"` + Count int `json:"total_group_count"` }{} if err := json.NewDecoder(r.Body).Decode(&responseData); err != nil { appErr := NewAppError("Api4.GetGroupsByChannel", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) @@ -3506,8 +4023,8 @@ func (c *Client4) GetGroupsByChannel(channelId string, opts GroupSearchOpts) ([] } // GetGroupsByTeam retrieves the Mattermost Groups associated with a given team -func (c *Client4) GetGroupsByTeam(teamId string, opts GroupSearchOpts) ([]*Group, int, *Response) { - path := fmt.Sprintf("%s/groups?q=%v&include_member_count=%v", c.GetTeamRoute(teamId), opts.Q, opts.IncludeMemberCount) +func (c *Client4) GetGroupsByTeam(teamId string, opts GroupSearchOpts) ([]*GroupWithSchemeAdmin, int, *Response) { + path := fmt.Sprintf("%s/groups?q=%v&include_member_count=%v&filter_allow_reference=%v", c.GetTeamRoute(teamId), opts.Q, opts.IncludeMemberCount, opts.FilterAllowReference) if opts.PageOpts != nil { path = fmt.Sprintf("%s&page=%v&per_page=%v", path, opts.PageOpts.Page, opts.PageOpts.PerPage) } @@ -3518,8 +4035,8 @@ func (c *Client4) GetGroupsByTeam(teamId string, opts GroupSearchOpts) ([]*Group defer closeBody(r) responseData := struct { - Groups []*Group `json:"groups"` - Count int `json:"total_group_count"` + Groups []*GroupWithSchemeAdmin `json:"groups"` + Count int `json:"total_group_count"` }{} if err := json.NewDecoder(r.Body).Decode(&responseData); err != nil { appErr := NewAppError("Api4.GetGroupsByTeam", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) @@ -3529,16 +4046,44 @@ func (c *Client4) GetGroupsByTeam(teamId string, opts GroupSearchOpts) ([]*Group return responseData.Groups, responseData.Count, BuildResponse(r) } +// GetGroupsAssociatedToChannelsByTeam retrieves the Mattermost Groups associated with channels in a given team +func (c *Client4) GetGroupsAssociatedToChannelsByTeam(teamId string, opts GroupSearchOpts) (map[string][]*GroupWithSchemeAdmin, *Response) { + path := fmt.Sprintf("%s/groups_by_channels?q=%v&filter_allow_reference=%v", c.GetTeamRoute(teamId), opts.Q, opts.FilterAllowReference) + if opts.PageOpts != nil { + path = fmt.Sprintf("%s&page=%v&per_page=%v", path, opts.PageOpts.Page, opts.PageOpts.PerPage) + } + r, appErr := c.DoApiGet(path, "") + if appErr != nil { + return nil, BuildErrorResponse(r, appErr) + } + defer closeBody(r) + + responseData := struct { + GroupsAssociatedToChannels map[string][]*GroupWithSchemeAdmin `json:"groups"` + }{} + if err := json.NewDecoder(r.Body).Decode(&responseData); err != nil { + appErr := NewAppError("Api4.GetGroupsAssociatedToChannelsByTeam", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + return nil, BuildErrorResponse(r, appErr) + } + + return responseData.GroupsAssociatedToChannels, BuildResponse(r) +} + // GetGroups retrieves Mattermost Groups func (c *Client4) GetGroups(opts GroupSearchOpts) ([]*Group, *Response) { path := fmt.Sprintf( - "%s?include_member_count=%v¬_associated_to_team=%v¬_associated_to_channel=%v&q=%v", + "%s?include_member_count=%v¬_associated_to_team=%v¬_associated_to_channel=%v&filter_allow_reference=%v&q=%v&filter_parent_team_permitted=%v", c.GetGroupsRoute(), opts.IncludeMemberCount, opts.NotAssociatedToTeam, opts.NotAssociatedToChannel, + opts.FilterAllowReference, opts.Q, + opts.FilterParentTeamPermitted, ) + if opts.Since > 0 { + path = fmt.Sprintf("%s&since=%v", path, opts.Since) + } if opts.PageOpts != nil { path = fmt.Sprintf("%s&page=%v&per_page=%v", path, opts.PageOpts.Page, opts.PageOpts.PerPage) } @@ -3551,6 +4096,90 @@ func (c *Client4) GetGroups(opts GroupSearchOpts) ([]*Group, *Response) { return GroupsFromJson(r.Body), BuildResponse(r) } +// GetGroupsByUserId retrieves Mattermost Groups for a user +func (c *Client4) GetGroupsByUserId(userId string) ([]*Group, *Response) { + path := fmt.Sprintf( + "%s/%v/groups", + c.GetUsersRoute(), + userId, + ) + + r, appErr := c.DoApiGet(path, "") + if appErr != nil { + return nil, BuildErrorResponse(r, appErr) + } + defer closeBody(r) + return GroupsFromJson(r.Body), BuildResponse(r) +} + +func (c *Client4) MigrateAuthToLdap(fromAuthService string, matchField string, force bool) (bool, *Response) { + r, err := c.DoApiPost(c.GetUsersRoute()+"/migrate_auth/ldap", StringInterfaceToJson(map[string]interface{}{ + "from": fromAuthService, + "force": force, + "match_field": matchField, + })) + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + +func (c *Client4) MigrateAuthToSaml(fromAuthService string, usersMap map[string]string, auto bool) (bool, *Response) { + r, err := c.DoApiPost(c.GetUsersRoute()+"/migrate_auth/saml", StringInterfaceToJson(map[string]interface{}{ + "from": fromAuthService, + "auto": auto, + "matches": usersMap, + })) + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + +// UploadLdapPublicCertificate will upload a public certificate for LDAP and set the config to use it. +func (c *Client4) UploadLdapPublicCertificate(data []byte) (bool, *Response) { + body, writer, err := fileToMultipart(data, LDAP_PUBLIC_CERTIFICATE_NAME) + if err != nil { + return false, &Response{Error: NewAppError("UploadLdapPublicCertificate", "model.client.upload_ldap_cert.app_error", nil, err.Error(), http.StatusBadRequest)} + } + + _, resp := c.DoUploadFile(c.GetLdapRoute()+"/certificate/public", body, writer.FormDataContentType()) + return resp.Error == nil, resp +} + +// UploadLdapPrivateCertificate will upload a private key for LDAP and set the config to use it. +func (c *Client4) UploadLdapPrivateCertificate(data []byte) (bool, *Response) { + body, writer, err := fileToMultipart(data, LDAP_PRIVATE_KEY_NAME) + if err != nil { + return false, &Response{Error: NewAppError("UploadLdapPrivateCertificate", "model.client.upload_Ldap_cert.app_error", nil, err.Error(), http.StatusBadRequest)} + } + + _, resp := c.DoUploadFile(c.GetLdapRoute()+"/certificate/private", body, writer.FormDataContentType()) + return resp.Error == nil, resp +} + +// DeleteLdapPublicCertificate deletes the LDAP IDP certificate from the server and updates the config to not use it and disable LDAP. +func (c *Client4) DeleteLdapPublicCertificate() (bool, *Response) { + r, err := c.DoApiDelete(c.GetLdapRoute() + "/certificate/public") + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + +// DeleteLDAPPrivateCertificate deletes the LDAP IDP certificate from the server and updates the config to not use it and disable LDAP. +func (c *Client4) DeleteLdapPrivateCertificate() (bool, *Response) { + r, err := c.DoApiDelete(c.GetLdapRoute() + "/certificate/private") + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + // Audits Section // GetAudits returns a list of audits for the whole system. @@ -3814,6 +4443,18 @@ func (c *Client4) PurgeElasticsearchIndexes() (bool, *Response) { return CheckStatusOK(r), BuildResponse(r) } +// Bleve Section + +// PurgeBleveIndexes immediately deletes all Bleve indexes. +func (c *Client4) PurgeBleveIndexes() (bool, *Response) { + r, err := c.DoApiPost(c.GetBleveRoute()+"/purge_indexes", "") + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + // Data Retention Section // GetDataRetentionPolicy will get the current server data retention policy details. @@ -3848,6 +4489,17 @@ func (c *Client4) UpdateCommand(cmd *Command) (*Command, *Response) { return CommandFromJson(r.Body), BuildResponse(r) } +// MoveCommand moves a command to a different team. +func (c *Client4) MoveCommand(teamId string, commandId string) (bool, *Response) { + cmr := CommandMoveRequest{TeamId: teamId} + r, err := c.DoApiPut(c.GetCommandMoveRoute(commandId), cmr.ToJson()) + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + // DeleteCommand deletes a command based on the provided command id string. func (c *Client4) DeleteCommand(commandId string) (bool, *Response) { r, err := c.DoApiDelete(c.GetCommandRoute(commandId)) @@ -3869,6 +4521,28 @@ func (c *Client4) ListCommands(teamId string, customOnly bool) ([]*Command, *Res return CommandListFromJson(r.Body), BuildResponse(r) } +// ListCommandAutocompleteSuggestions will retrieve a list of suggestions for a userInput. +func (c *Client4) ListCommandAutocompleteSuggestions(userInput, teamId string) ([]AutocompleteSuggestion, *Response) { + query := fmt.Sprintf("/commands/autocomplete_suggestions?user_input=%v", userInput) + r, err := c.DoApiGet(c.GetTeamRoute(teamId)+query, "") + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return AutocompleteSuggestionsFromJSON(r.Body), BuildResponse(r) +} + +// GetCommandById will retrieve a command by id. +func (c *Client4) GetCommandById(cmdId string) (*Command, *Response) { + url := fmt.Sprintf("%s/%s", c.GetCommandsRoute(), cmdId) + r, err := c.DoApiGet(url, "") + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CommandFromJson(r.Body), BuildResponse(r) +} + // ExecuteCommand executes a given slash command. func (c *Client4) ExecuteCommand(channelId, command string) (*CommandResponse, *Response) { commandArgs := &CommandArgs{ @@ -4197,6 +4871,21 @@ func (c *Client4) CancelJob(jobId string) (bool, *Response) { return CheckStatusOK(r), BuildResponse(r) } +// DownloadJob downloads the results of the job +func (c *Client4) DownloadJob(jobId string) ([]byte, *Response) { + r, appErr := c.DoApiGet(c.GetJobsRoute()+fmt.Sprintf("/%v/download", jobId), "") + if appErr != nil { + return nil, BuildErrorResponse(r, appErr) + } + defer closeBody(r) + + data, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, BuildErrorResponse(r, NewAppError("GetFile", "model.client.read_job_result_file.app_error", nil, err.Error(), r.StatusCode)) + } + return data, BuildResponse(r) +} + // Roles Section // GetRole gets a single role by ID. @@ -4328,7 +5017,7 @@ func (c *Client4) uploadPlugin(file io.Reader, force bool) (*Manifest, *Response writer := multipart.NewWriter(body) if force { - err := writer.WriteField("force", "true") + err := writer.WriteField("force", c.boolString(true)) if err != nil { return nil, &Response{Error: NewAppError("UploadPlugin", "model.client.writer.app_error", nil, err.Error(), 0)} } @@ -4371,10 +5060,7 @@ func (c *Client4) uploadPlugin(file io.Reader, force bool) (*Manifest, *Response } func (c *Client4) InstallPluginFromUrl(downloadUrl string, force bool) (*Manifest, *Response) { - forceStr := "false" - if force { - forceStr = "true" - } + forceStr := c.boolString(force) url := fmt.Sprintf("%s?plugin_download_url=%s&force=%s", c.GetPluginsRoute()+"/install_from_url", url.QueryEscape(downloadUrl), forceStr) r, err := c.DoApiPost(url, "") @@ -4385,6 +5071,21 @@ func (c *Client4) InstallPluginFromUrl(downloadUrl string, force bool) (*Manifes return ManifestFromJson(r.Body), BuildResponse(r) } +// InstallMarketplacePlugin will install marketplace plugin. +// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE. +func (c *Client4) InstallMarketplacePlugin(request *InstallMarketplacePluginRequest) (*Manifest, *Response) { + json, err := request.ToJson() + if err != nil { + return nil, &Response{Error: NewAppError("InstallMarketplacePlugin", "model.client.plugin_request_to_json.app_error", nil, err.Error(), http.StatusBadRequest)} + } + r, appErr := c.DoApiPost(c.GetPluginsRoute()+"/marketplace", json) + if appErr != nil { + return nil, BuildErrorResponse(r, appErr) + } + defer closeBody(r) + return ManifestFromJson(r.Body), BuildResponse(r) +} + // GetPlugins will return a list of plugin manifests for currently active plugins. // WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE. func (c *Client4) GetPlugins() (*PluginsResponse, *Response) { @@ -4400,7 +5101,7 @@ func (c *Client4) GetPlugins() (*PluginsResponse, *Response) { // to the administrator via the system console. // WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE. func (c *Client4) GetPluginStatuses() (PluginStatuses, *Response) { - r, err := c.DoApiGet(c.GetPluginsRoute(), "/statuses") + r, err := c.DoApiGet(c.GetPluginsRoute()+"/statuses", "") if err != nil { return nil, BuildErrorResponse(r, err) } @@ -4452,6 +5153,31 @@ func (c *Client4) DisablePlugin(id string) (bool, *Response) { return CheckStatusOK(r), BuildResponse(r) } +// GetMarketplacePlugins will return a list of plugins that an admin can install. +// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE. +func (c *Client4) GetMarketplacePlugins(filter *MarketplacePluginFilter) ([]*MarketplacePlugin, *Response) { + route := c.GetPluginsRoute() + "/marketplace" + u, parseErr := url.Parse(route) + if parseErr != nil { + return nil, &Response{Error: NewAppError("GetMarketplacePlugins", "model.client.parse_plugins.app_error", nil, parseErr.Error(), http.StatusBadRequest)} + } + + filter.ApplyToURL(u) + + r, err := c.DoApiGet(u.String(), "") + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + + plugins, readerErr := MarketplacePluginsFromReader(r.Body) + if readerErr != nil { + return nil, BuildErrorResponse(r, NewAppError(route, "model.client.parse_plugins.app_error", nil, err.Error(), http.StatusBadRequest)) + } + + return plugins, BuildResponse(r) +} + // UpdateChannelScheme will update a channel's scheme. func (c *Client4) UpdateChannelScheme(channelId, schemeId string) (bool, *Response) { sip := &SchemeIDPatch{SchemeID: &schemeId} @@ -4485,6 +5211,56 @@ func (c *Client4) GetRedirectLocation(urlParam, etag string) (string, *Response) return MapFromJson(r.Body)["location"], BuildResponse(r) } +// SetServerBusy will mark the server as busy, which disables non-critical services for `secs` seconds. +func (c *Client4) SetServerBusy(secs int) (bool, *Response) { + url := fmt.Sprintf("%s?seconds=%d", c.GetServerBusyRoute(), secs) + r, err := c.DoApiPost(url, "") + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + +// ClearServerBusy will mark the server as not busy. +func (c *Client4) ClearServerBusy() (bool, *Response) { + r, err := c.DoApiDelete(c.GetServerBusyRoute()) + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + +// GetServerBusy returns the current ServerBusyState including the time when a server marked busy +// will automatically have the flag cleared. +func (c *Client4) GetServerBusy() (*ServerBusyState, *Response) { + r, err := c.DoApiGet(c.GetServerBusyRoute(), "") + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + + sbs := ServerBusyStateFromJson(r.Body) + return sbs, BuildResponse(r) +} + +// GetServerBusyExpires returns the time when a server marked busy +// will automatically have the flag cleared. +// +// Deprecated: Use GetServerBusy instead. +func (c *Client4) GetServerBusyExpires() (*time.Time, *Response) { + r, err := c.DoApiGet(c.GetServerBusyRoute(), "") + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + + sbs := ServerBusyStateFromJson(r.Body) + expires := time.Unix(sbs.Expires, 0) + return &expires, BuildResponse(r) +} + // RegisterTermsOfServiceAction saves action performed by a user against a specific terms of service. func (c *Client4) RegisterTermsOfServiceAction(userId, termsOfServiceId string, accepted bool) (*bool, *Response) { url := c.GetUserTermsOfServiceRoute(userId) @@ -4622,3 +5398,460 @@ func (c *Client4) ChannelMembersMinusGroupMembers(channelID string, groupIDs []s ugc := UsersWithGroupsAndCountFromJson(r.Body) return ugc.Users, ugc.Count, BuildResponse(r) } + +func (c *Client4) PatchConfig(config *Config) (*Config, *Response) { + r, err := c.DoApiPut(c.GetConfigRoute()+"/patch", config.ToJson()) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return ConfigFromJson(r.Body), BuildResponse(r) +} + +func (c *Client4) GetChannelModerations(channelID string, etag string) ([]*ChannelModeration, *Response) { + r, err := c.DoApiGet(c.GetChannelRoute(channelID)+"/moderations", etag) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return ChannelModerationsFromJson(r.Body), BuildResponse(r) +} + +func (c *Client4) PatchChannelModerations(channelID string, patch []*ChannelModerationPatch) ([]*ChannelModeration, *Response) { + payload, _ := json.Marshal(patch) + r, err := c.DoApiPut(c.GetChannelRoute(channelID)+"/moderations/patch", string(payload)) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return ChannelModerationsFromJson(r.Body), BuildResponse(r) +} + +func (c *Client4) GetKnownUsers() ([]string, *Response) { + r, err := c.DoApiGet(c.GetUsersRoute()+"/known", "") + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + var userIds []string + json.NewDecoder(r.Body).Decode(&userIds) + return userIds, BuildResponse(r) +} + +// PublishUserTyping publishes a user is typing websocket event based on the provided TypingRequest. +func (c *Client4) PublishUserTyping(userID string, typingRequest TypingRequest) (bool, *Response) { + r, err := c.DoApiPost(c.GetPublishUserTypingRoute(userID), typingRequest.ToJson()) + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + +func (c *Client4) GetChannelMemberCountsByGroup(channelID string, includeTimezones bool, etag string) ([]*ChannelMemberCountByGroup, *Response) { + r, err := c.DoApiGet(c.GetChannelRoute(channelID)+"/member_counts_by_group?include_timezones="+strconv.FormatBool(includeTimezones), etag) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return ChannelMemberCountsByGroupFromJson(r.Body), BuildResponse(r) +} + +// RequestTrialLicense will request a trial license and install it in the server +func (c *Client4) RequestTrialLicense(users int) (bool, *Response) { + b, _ := json.Marshal(map[string]int{"users": users}) + r, err := c.DoApiPost("/trial-license", string(b)) + if err != nil { + return false, BuildErrorResponse(r, err) + } + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) +} + +// GetGroupStats retrieves stats for a Mattermost Group +func (c *Client4) GetGroupStats(groupID string) (*GroupStats, *Response) { + r, appErr := c.DoApiGet(c.GetGroupRoute(groupID)+"/stats", "") + if appErr != nil { + return nil, BuildErrorResponse(r, appErr) + } + defer closeBody(r) + return GroupStatsFromJson(r.Body), BuildResponse(r) +} + +func (c *Client4) GetSidebarCategoriesForTeamForUser(userID, teamID, etag string) (*OrderedSidebarCategories, *Response) { + route := c.GetUserCategoryRoute(userID, teamID) + r, appErr := c.DoApiGet(route, etag) + if appErr != nil { + return nil, BuildErrorResponse(r, appErr) + } + cat, err := OrderedSidebarCategoriesFromJson(r.Body) + if err != nil { + return nil, BuildErrorResponse(r, NewAppError("Client4.GetSidebarCategoriesForTeamForUser", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode)) + } + return cat, BuildResponse(r) +} + +func (c *Client4) CreateSidebarCategoryForTeamForUser(userID, teamID string, category *SidebarCategoryWithChannels) (*SidebarCategoryWithChannels, *Response) { + payload, _ := json.Marshal(category) + route := c.GetUserCategoryRoute(userID, teamID) + r, appErr := c.doApiPostBytes(route, payload) + if appErr != nil { + return nil, BuildErrorResponse(r, appErr) + } + defer closeBody(r) + cat, err := SidebarCategoryFromJson(r.Body) + if err != nil { + return nil, BuildErrorResponse(r, NewAppError("Client4.CreateSidebarCategoryForTeamForUser", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode)) + } + return cat, BuildResponse(r) +} + +func (c *Client4) UpdateSidebarCategoriesForTeamForUser(userID, teamID string, categories []*SidebarCategoryWithChannels) ([]*SidebarCategoryWithChannels, *Response) { + payload, _ := json.Marshal(categories) + route := c.GetUserCategoryRoute(userID, teamID) + + r, appErr := c.doApiPutBytes(route, payload) + if appErr != nil { + return nil, BuildErrorResponse(r, appErr) + } + defer closeBody(r) + + categories, err := SidebarCategoriesFromJson(r.Body) + if err != nil { + return nil, BuildErrorResponse(r, NewAppError("Client4.UpdateSidebarCategoriesForTeamForUser", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode)) + } + + return categories, BuildResponse(r) +} + +func (c *Client4) GetSidebarCategoryOrderForTeamForUser(userID, teamID, etag string) ([]string, *Response) { + route := c.GetUserCategoryRoute(userID, teamID) + "/order" + r, err := c.DoApiGet(route, etag) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return ArrayFromJson(r.Body), BuildResponse(r) +} + +func (c *Client4) UpdateSidebarCategoryOrderForTeamForUser(userID, teamID string, order []string) ([]string, *Response) { + payload, _ := json.Marshal(order) + route := c.GetUserCategoryRoute(userID, teamID) + "/order" + r, err := c.doApiPutBytes(route, payload) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return ArrayFromJson(r.Body), BuildResponse(r) +} + +func (c *Client4) GetSidebarCategoryForTeamForUser(userID, teamID, categoryID, etag string) (*SidebarCategoryWithChannels, *Response) { + route := c.GetUserCategoryRoute(userID, teamID) + "/" + categoryID + r, appErr := c.DoApiGet(route, etag) + if appErr != nil { + return nil, BuildErrorResponse(r, appErr) + } + defer closeBody(r) + cat, err := SidebarCategoryFromJson(r.Body) + if err != nil { + return nil, BuildErrorResponse(r, NewAppError("Client4.UpdateSidebarCategoriesForTeamForUser", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode)) + } + + return cat, BuildResponse(r) +} + +func (c *Client4) UpdateSidebarCategoryForTeamForUser(userID, teamID, categoryID string, category *SidebarCategoryWithChannels) (*SidebarCategoryWithChannels, *Response) { + payload, _ := json.Marshal(category) + route := c.GetUserCategoryRoute(userID, teamID) + "/" + categoryID + r, appErr := c.doApiPutBytes(route, payload) + if appErr != nil { + return nil, BuildErrorResponse(r, appErr) + } + defer closeBody(r) + cat, err := SidebarCategoryFromJson(r.Body) + if err != nil { + return nil, BuildErrorResponse(r, NewAppError("Client4.UpdateSidebarCategoriesForTeamForUser", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode)) + } + + return cat, BuildResponse(r) +} + +// CheckIntegrity performs a database integrity check. +func (c *Client4) CheckIntegrity() ([]IntegrityCheckResult, *Response) { + r, err := c.DoApiPost("/integrity", "") + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + var results []IntegrityCheckResult + if err := json.NewDecoder(r.Body).Decode(&results); err != nil { + appErr := NewAppError("Api4.CheckIntegrity", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError) + return nil, BuildErrorResponse(r, appErr) + } + return results, BuildResponse(r) +} + +func (c *Client4) GetNotices(lastViewed int64, teamId string, client NoticeClientType, clientVersion, locale, etag string) (NoticeMessages, *Response) { + url := fmt.Sprintf("/system/notices/%s?lastViewed=%d&client=%s&clientVersion=%s&locale=%s", teamId, lastViewed, client, clientVersion, locale) + r, appErr := c.DoApiGet(url, etag) + if appErr != nil { + return nil, BuildErrorResponse(r, appErr) + } + defer closeBody(r) + notices, err := UnmarshalProductNoticeMessages(r.Body) + if err != nil { + return nil, &Response{StatusCode: http.StatusBadRequest, Error: NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), http.StatusForbidden)} + } + return notices, BuildResponse(r) +} + +func (c *Client4) MarkNoticesViewed(ids []string) *Response { + r, err := c.DoApiPut("/system/notices/view", ArrayToJson(ids)) + if err != nil { + return BuildErrorResponse(r, err) + } + defer closeBody(r) + return BuildResponse(r) +} + +// CreateUpload creates a new upload session. +func (c *Client4) CreateUpload(us *UploadSession) (*UploadSession, *Response) { + r, err := c.DoApiPost(c.GetUploadsRoute(), us.ToJson()) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return UploadSessionFromJson(r.Body), BuildResponse(r) +} + +// GetUpload returns the upload session for the specified uploadId. +func (c *Client4) GetUpload(uploadId string) (*UploadSession, *Response) { + r, err := c.DoApiGet(c.GetUploadRoute(uploadId), "") + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return UploadSessionFromJson(r.Body), BuildResponse(r) +} + +// GetUploadsForUser returns the upload sessions created by the specified +// userId. +func (c *Client4) GetUploadsForUser(userId string) ([]*UploadSession, *Response) { + r, err := c.DoApiGet(c.GetUserRoute(userId)+"/uploads", "") + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return UploadSessionsFromJson(r.Body), BuildResponse(r) +} + +// UploadData performs an upload. On success it returns +// a FileInfo object. +func (c *Client4) UploadData(uploadId string, data io.Reader) (*FileInfo, *Response) { + url := c.GetUploadRoute(uploadId) + r, err := c.doApiRequestReader("POST", c.ApiUrl+url, data, "") + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return FileInfoFromJson(r.Body), BuildResponse(r) +} + +func (c *Client4) UpdatePassword(userId, currentPassword, newPassword string) *Response { + requestBody := map[string]string{"current_password": currentPassword, "new_password": newPassword} + r, err := c.DoApiPut(c.GetUserRoute(userId)+"/password", MapToJson(requestBody)) + if err != nil { + return BuildErrorResponse(r, err) + } + defer closeBody(r) + return BuildResponse(r) +} + +// Cloud Section + +func (c *Client4) GetCloudProducts() ([]*Product, *Response) { + r, appErr := c.DoApiGet(c.GetCloudRoute()+"/products", "") + if appErr != nil { + return nil, BuildErrorResponse(r, appErr) + } + defer closeBody(r) + + var cloudProducts []*Product + json.NewDecoder(r.Body).Decode(&cloudProducts) + + return cloudProducts, BuildResponse(r) +} + +func (c *Client4) CreateCustomerPayment() (*StripeSetupIntent, *Response) { + r, appErr := c.DoApiPost(c.GetCloudRoute()+"/payment", "") + if appErr != nil { + return nil, BuildErrorResponse(r, appErr) + } + defer closeBody(r) + + var setupIntent *StripeSetupIntent + json.NewDecoder(r.Body).Decode(&setupIntent) + + return setupIntent, BuildResponse(r) +} + +func (c *Client4) ConfirmCustomerPayment(confirmRequest *ConfirmPaymentMethodRequest) *Response { + json, _ := json.Marshal(confirmRequest) + + r, appErr := c.doApiPostBytes(c.GetCloudRoute()+"/payment/confirm", json) + if appErr != nil { + return BuildErrorResponse(r, appErr) + } + defer closeBody(r) + + return BuildResponse(r) +} + +func (c *Client4) GetCloudCustomer() (*CloudCustomer, *Response) { + r, appErr := c.DoApiGet(c.GetCloudRoute()+"/customer", "") + if appErr != nil { + return nil, BuildErrorResponse(r, appErr) + } + defer closeBody(r) + + var cloudCustomer *CloudCustomer + json.NewDecoder(r.Body).Decode(&cloudCustomer) + + return cloudCustomer, BuildResponse(r) +} + +func (c *Client4) GetSubscription() (*Subscription, *Response) { + r, appErr := c.DoApiGet(c.GetCloudRoute()+"/subscription", "") + if appErr != nil { + return nil, BuildErrorResponse(r, appErr) + } + defer closeBody(r) + + var subscription *Subscription + json.NewDecoder(r.Body).Decode(&subscription) + + return subscription, BuildResponse(r) +} + +func (c *Client4) GetInvoicesForSubscription() ([]*Invoice, *Response) { + r, appErr := c.DoApiGet(c.GetCloudRoute()+"/subscription/invoices", "") + if appErr != nil { + return nil, BuildErrorResponse(r, appErr) + } + defer closeBody(r) + + var invoices []*Invoice + json.NewDecoder(r.Body).Decode(&invoices) + + return invoices, BuildResponse(r) +} + +func (c *Client4) UpdateCloudCustomer(customerInfo *CloudCustomerInfo) (*CloudCustomer, *Response) { + customerBytes, _ := json.Marshal(customerInfo) + + r, appErr := c.doApiPutBytes(c.GetCloudRoute()+"/customer", customerBytes) + if appErr != nil { + return nil, BuildErrorResponse(r, appErr) + } + defer closeBody(r) + + var customer *CloudCustomer + json.NewDecoder(r.Body).Decode(&customer) + + return customer, BuildResponse(r) +} + +func (c *Client4) UpdateCloudCustomerAddress(address *Address) (*CloudCustomer, *Response) { + addressBytes, _ := json.Marshal(address) + + r, appErr := c.doApiPutBytes(c.GetCloudRoute()+"/customer/address", addressBytes) + if appErr != nil { + return nil, BuildErrorResponse(r, appErr) + } + defer closeBody(r) + + var customer *CloudCustomer + json.NewDecoder(r.Body).Decode(&customer) + + return customer, BuildResponse(r) +} + +func (c *Client4) ListImports() ([]string, *Response) { + r, err := c.DoApiGet(c.GetImportsRoute(), "") + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return ArrayFromJson(r.Body), BuildResponse(r) +} + +func (c *Client4) GetUserThreads(userId, teamId string, options GetUserThreadsOpts) (*Threads, *Response) { + v := url.Values{} + if options.Since != 0 { + v.Set("since", fmt.Sprintf("%d", options.Since)) + } + if options.Page != 0 { + v.Set("page", fmt.Sprintf("%d", options.Page)) + } + if options.PageSize != 0 { + v.Set("pageSize", fmt.Sprintf("%d", options.PageSize)) + } + if options.Extended { + v.Set("extended", "true") + } + if options.Deleted { + v.Set("deleted", "true") + } + + url := c.GetUserThreadsRoute(userId, teamId) + if len(v) > 0 { + url += "?" + v.Encode() + } + + r, appErr := c.DoApiGet(url, "") + if appErr != nil { + return nil, BuildErrorResponse(r, appErr) + } + defer closeBody(r) + + var threads Threads + json.NewDecoder(r.Body).Decode(&threads) + + return &threads, BuildResponse(r) +} + +func (c *Client4) UpdateThreadsReadForUser(userId, teamId string) *Response { + r, appErr := c.DoApiPut(fmt.Sprintf("%s/read", c.GetUserThreadsRoute(userId, teamId)), "") + if appErr != nil { + return BuildErrorResponse(r, appErr) + } + defer closeBody(r) + + return BuildResponse(r) +} + +func (c *Client4) UpdateThreadReadForUser(userId, teamId, threadId string, timestamp int64) *Response { + r, appErr := c.DoApiPut(fmt.Sprintf("%s/read/%d", c.GetUserThreadRoute(userId, teamId, threadId), timestamp), "") + if appErr != nil { + return BuildErrorResponse(r, appErr) + } + defer closeBody(r) + + return BuildResponse(r) +} + +func (c *Client4) UpdateThreadFollowForUser(userId, teamId, threadId string, state bool) *Response { + var appErr *AppError + var r *http.Response + if state { + r, appErr = c.DoApiPut(c.GetUserThreadRoute(userId, teamId, threadId)+"/following", "") + } else { + r, appErr = c.DoApiDelete(c.GetUserThreadRoute(userId, teamId, threadId) + "/following") + } + if appErr != nil { + return BuildErrorResponse(r, appErr) + } + defer closeBody(r) + + return BuildResponse(r) +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/cloud.go b/vendor/github.com/mattermost/mattermost-server/v5/model/cloud.go new file mode 100644 index 0000000..2140289 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/cloud.go @@ -0,0 +1,130 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +const ( + EventTypeFailedPayment = "failed-payment" + EventTypeFailedPaymentNoCard = "failed-payment-no-card" +) + +// Product model represents a product on the cloud system. +type Product struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + PricePerSeat float64 `json:"price_per_seat"` + AddOns []*AddOn `json:"add_ons"` +} + +// AddOn represents an addon to a product. +type AddOn struct { + ID string `json:"id"` + Name string `json:"name"` + DisplayName string `json:"display_name"` + PricePerSeat float64 `json:"price_per_seat"` +} + +// StripeSetupIntent represents the SetupIntent model from Stripe for updating payment methods. +type StripeSetupIntent struct { + ID string `json:"id"` + ClientSecret string `json:"client_secret"` +} + +// ConfirmPaymentMethodRequest contains the fields for the customer payment update API. +type ConfirmPaymentMethodRequest struct { + StripeSetupIntentID string `json:"stripe_setup_intent_id"` +} + +// Customer model represents a customer on the system. +type CloudCustomer struct { + CloudCustomerInfo + ID string `json:"id"` + CreatorID string `json:"creator_id"` + CreateAt int64 `json:"create_at"` + BillingAddress *Address `json:"billing_address"` + CompanyAddress *Address `json:"company_address"` + PaymentMethod *PaymentMethod `json:"payment_method"` +} + +// CloudCustomerInfo represents editable info of a customer. +type CloudCustomerInfo struct { + Name string `json:"name"` + Email string `json:"email,omitempty"` + ContactFirstName string `json:"contact_first_name,omitempty"` + ContactLastName string `json:"contact_last_name,omitempty"` + NumEmployees int `json:"num_employees"` +} + +// Address model represents a customer's address. +type Address struct { + City string `json:"city"` + Country string `json:"country"` + Line1 string `json:"line1"` + Line2 string `json:"line2"` + PostalCode string `json:"postal_code"` + State string `json:"state"` +} + +// PaymentMethod represents methods of payment for a customer. +type PaymentMethod struct { + Type string `json:"type"` + LastFour int `json:"last_four"` + ExpMonth int `json:"exp_month"` + ExpYear int `json:"exp_year"` + CardBrand string `json:"card_brand"` + Name string `json:"name"` +} + +// Subscription model represents a subscription on the system. +type Subscription struct { + ID string `json:"id"` + CustomerID string `json:"customer_id"` + ProductID string `json:"product_id"` + AddOns []string `json:"add_ons"` + StartAt int64 `json:"start_at"` + EndAt int64 `json:"end_at"` + CreateAt int64 `json:"create_at"` + Seats int `json:"seats"` + Status string `json:"status"` + DNS string `json:"dns"` + IsPaidTier string `json:"is_paid_tier"` + LastInvoice *Invoice `json:"last_invoice"` +} + +// Invoice model represents a cloud invoice +type Invoice struct { + ID string `json:"id"` + Number string `json:"number"` + CreateAt int64 `json:"create_at"` + Total int64 `json:"total"` + Tax int64 `json:"tax"` + Status string `json:"status"` + Description string `json:"description"` + PeriodStart int64 `json:"period_start"` + PeriodEnd int64 `json:"period_end"` + SubscriptionID string `json:"subscription_id"` + Items []*InvoiceLineItem `json:"line_items"` +} + +// InvoiceLineItem model represents a cloud invoice lineitem tied to an invoice. +type InvoiceLineItem struct { + PriceID string `json:"price_id"` + Total int64 `json:"total"` + Quantity int64 `json:"quantity"` + PricePerUnit int64 `json:"price_per_unit"` + Description string `json:"description"` + Type string `json:"type"` + Metadata map[string]interface{} `json:"metadata"` +} + +type CWSWebhookPayload struct { + Event string `json:"event"` + FailedPayment *FailedPayment `json:"failed_payment"` +} + +type FailedPayment struct { + CardBrand string `json:"card_brand"` + LastFour int `json:"last_four"` + FailureMessage string `json:"failure_message"` +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/cluster_discovery.go b/vendor/github.com/mattermost/mattermost-server/v5/model/cluster_discovery.go similarity index 95% rename from vendor/github.com/mattermost/mattermost-server/model/cluster_discovery.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/cluster_discovery.go index 9fbd84f..f6c9275 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/cluster_discovery.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/cluster_discovery.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -89,7 +89,7 @@ func FilterClusterDiscovery(vs []*ClusterDiscovery, f func(*ClusterDiscovery) bo } func (o *ClusterDiscovery) IsValid() *AppError { - if len(o.Id) != 26 { + if !IsValidId(o.Id) { return NewAppError("ClusterDiscovery.IsValid", "model.cluster.is_valid.id.app_error", nil, "", http.StatusBadRequest) } diff --git a/vendor/github.com/mattermost/mattermost-server/model/cluster_info.go b/vendor/github.com/mattermost/mattermost-server/v5/model/cluster_info.go similarity index 78% rename from vendor/github.com/mattermost/mattermost-server/model/cluster_info.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/cluster_info.go index 46a3487..fc15d38 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/cluster_info.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/cluster_info.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -16,15 +16,15 @@ type ClusterInfo struct { Hostname string `json:"hostname"` } -func (me *ClusterInfo) ToJson() string { - b, _ := json.Marshal(me) +func (ci *ClusterInfo) ToJson() string { + b, _ := json.Marshal(ci) return string(b) } func ClusterInfoFromJson(data io.Reader) *ClusterInfo { - var me *ClusterInfo - json.NewDecoder(data).Decode(&me) - return me + var ci *ClusterInfo + json.NewDecoder(data).Decode(&ci) + return ci } func ClusterInfosToJson(objmap []*ClusterInfo) string { @@ -38,7 +38,6 @@ func ClusterInfosFromJson(data io.Reader) []*ClusterInfo { var objmap []*ClusterInfo if err := decoder.Decode(&objmap); err != nil { return make([]*ClusterInfo, 0) - } else { - return objmap } + return objmap } diff --git a/vendor/github.com/mattermost/mattermost-server/model/cluster_message.go b/vendor/github.com/mattermost/mattermost-server/v5/model/cluster_message.go similarity index 51% rename from vendor/github.com/mattermost/mattermost-server/model/cluster_message.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/cluster_message.go index 04d8673..529f4a9 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/cluster_message.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/cluster_message.go @@ -1,5 +1,5 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -19,14 +19,39 @@ const ( CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL_MEMBERS = "inv_channel_members" CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL_BY_NAME = "inv_channel_name" CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL = "inv_channel" + CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL_GUEST_COUNT = "inv_channel_guest_count" CLUSTER_EVENT_INVALIDATE_CACHE_FOR_USER = "inv_user" CLUSTER_EVENT_INVALIDATE_CACHE_FOR_USER_TEAMS = "inv_user_teams" CLUSTER_EVENT_CLEAR_SESSION_CACHE_FOR_USER = "clear_session_user" CLUSTER_EVENT_INVALIDATE_CACHE_FOR_ROLES = "inv_roles" + CLUSTER_EVENT_INVALIDATE_CACHE_FOR_ROLE_PERMISSIONS = "inv_role_permissions" + CLUSTER_EVENT_INVALIDATE_CACHE_FOR_PROFILE_BY_IDS = "inv_profile_ids" + CLUSTER_EVENT_INVALIDATE_CACHE_FOR_PROFILE_IN_CHANNEL = "inv_profile_in_channel" CLUSTER_EVENT_INVALIDATE_CACHE_FOR_SCHEMES = "inv_schemes" + CLUSTER_EVENT_INVALIDATE_CACHE_FOR_FILE_INFOS = "inv_file_infos" + CLUSTER_EVENT_INVALIDATE_CACHE_FOR_WEBHOOKS = "inv_webhooks" + CLUSTER_EVENT_INVALIDATE_CACHE_FOR_EMOJIS_BY_ID = "inv_emojis_by_id" + CLUSTER_EVENT_INVALIDATE_CACHE_FOR_EMOJIS_ID_BY_NAME = "inv_emojis_id_by_name" + CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL_PINNEDPOSTS_COUNTS = "inv_channel_pinnedposts_counts" + CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL_MEMBER_COUNTS = "inv_channel_member_counts" + CLUSTER_EVENT_INVALIDATE_CACHE_FOR_LAST_POSTS = "inv_last_posts" + CLUSTER_EVENT_INVALIDATE_CACHE_FOR_LAST_POST_TIME = "inv_last_post_time" + CLUSTER_EVENT_INVALIDATE_CACHE_FOR_TEAMS = "inv_teams" CLUSTER_EVENT_CLEAR_SESSION_CACHE_FOR_ALL_USERS = "inv_all_user_sessions" CLUSTER_EVENT_INSTALL_PLUGIN = "install_plugin" CLUSTER_EVENT_REMOVE_PLUGIN = "remove_plugin" + CLUSTER_EVENT_INVALIDATE_CACHE_FOR_TERMS_OF_SERVICE = "inv_terms_of_service" + CLUSTER_EVENT_BUSY_STATE_CHANGED = "busy_state_change" + + // Gossip communication + CLUSTER_GOSSIP_EVENT_REQUEST_GET_LOGS = "gossip_request_get_logs" + CLUSTER_GOSSIP_EVENT_RESPONSE_GET_LOGS = "gossip_response_get_logs" + CLUSTER_GOSSIP_EVENT_REQUEST_GET_CLUSTER_STATS = "gossip_request_cluster_stats" + CLUSTER_GOSSIP_EVENT_RESPONSE_GET_CLUSTER_STATS = "gossip_response_cluster_stats" + CLUSTER_GOSSIP_EVENT_REQUEST_GET_PLUGIN_STATUSES = "gossip_request_plugin_statuses" + CLUSTER_GOSSIP_EVENT_RESPONSE_GET_PLUGIN_STATUSES = "gossip_response_plugin_statuses" + CLUSTER_GOSSIP_EVENT_REQUEST_SAVE_CONFIG = "gossip_request_save_config" + CLUSTER_GOSSIP_EVENT_RESPONSE_SAVE_CONFIG = "gossip_response_save_config" // SendTypes for ClusterMessage. CLUSTER_SEND_BEST_EFFORT = "best_effort" diff --git a/vendor/github.com/mattermost/mattermost-server/model/cluster_stats.go b/vendor/github.com/mattermost/mattermost-server/v5/model/cluster_stats.go similarity index 73% rename from vendor/github.com/mattermost/mattermost-server/model/cluster_stats.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/cluster_stats.go index 064f7b8..9e8c630 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/cluster_stats.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/cluster_stats.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -15,13 +15,13 @@ type ClusterStats struct { TotalMasterDbConnections int `json:"total_master_db_connections"` } -func (me *ClusterStats) ToJson() string { - b, _ := json.Marshal(me) +func (cs *ClusterStats) ToJson() string { + b, _ := json.Marshal(cs) return string(b) } func ClusterStatsFromJson(data io.Reader) *ClusterStats { - var me *ClusterStats - json.NewDecoder(data).Decode(&me) - return me + var cs *ClusterStats + json.NewDecoder(data).Decode(&cs) + return cs } diff --git a/vendor/github.com/mattermost/mattermost-server/model/command.go b/vendor/github.com/mattermost/mattermost-server/v5/model/command.go similarity index 72% rename from vendor/github.com/mattermost/mattermost-server/model/command.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/command.go index b23e502..0013046 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/command.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/command.go @@ -1,5 +1,5 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -35,6 +35,12 @@ type Command struct { DisplayName string `json:"display_name"` Description string `json:"description"` URL string `json:"url"` + // PluginId records the id of the plugin that created this Command. If it is blank, the Command + // was not created by a plugin. + PluginId string `json:"plugin_id"` + AutocompleteData *AutocompleteData `db:"-" json:"autocomplete_data,omitempty"` + // AutocompleteIconData is a base64 encoded svg + AutocompleteIconData string `db:"-" json:"autocomplete_icon_data,omitempty"` } func (o *Command) ToJson() string { @@ -61,7 +67,7 @@ func CommandListFromJson(data io.Reader) []*Command { func (o *Command) IsValid() *AppError { - if len(o.Id) != 26 { + if !IsValidId(o.Id) { return NewAppError("Command.IsValid", "model.command.is_valid.id.app_error", nil, "", http.StatusBadRequest) } @@ -77,11 +83,21 @@ func (o *Command) IsValid() *AppError { return NewAppError("Command.IsValid", "model.command.is_valid.update_at.app_error", nil, "", http.StatusBadRequest) } - if len(o.CreatorId) != 26 { + // If the CreatorId is blank, this should be a command created by a plugin. + if o.CreatorId == "" && !IsValidPluginId(o.PluginId) { + return NewAppError("Command.IsValid", "model.command.is_valid.plugin_id.app_error", nil, "", http.StatusBadRequest) + } + + // If the PluginId is blank, this should be a command associated with a userId. + if o.PluginId == "" && !IsValidId(o.CreatorId) { return NewAppError("Command.IsValid", "model.command.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) } - if len(o.TeamId) != 26 { + if o.CreatorId != "" && o.PluginId != "" { + return NewAppError("Command.IsValid", "model.command.is_valid.plugin_id.app_error", nil, "command cannot have both a CreatorId and a PluginId", http.StatusBadRequest) + } + + if !IsValidId(o.TeamId) { return NewAppError("Command.IsValid", "model.command.is_valid.team_id.app_error", nil, "", http.StatusBadRequest) } @@ -109,6 +125,12 @@ func (o *Command) IsValid() *AppError { return NewAppError("Command.IsValid", "model.command.is_valid.description.app_error", nil, "", http.StatusBadRequest) } + if o.AutocompleteData != nil { + if err := o.AutocompleteData.IsValid(); err != nil { + return NewAppError("Command.IsValid", "model.command.is_valid.autocomplete_data.app_error", nil, err.Error(), http.StatusBadRequest) + } + } + return nil } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/command_args.go b/vendor/github.com/mattermost/mattermost-server/v5/model/command_args.go new file mode 100644 index 0000000..15a6372 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/command_args.go @@ -0,0 +1,59 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "io" + + goi18n "github.com/mattermost/go-i18n/i18n" +) + +type CommandArgs struct { + UserId string `json:"user_id"` + ChannelId string `json:"channel_id"` + TeamId string `json:"team_id"` + RootId string `json:"root_id"` + ParentId string `json:"parent_id"` + TriggerId string `json:"trigger_id,omitempty"` + Command string `json:"command"` + SiteURL string `json:"-"` + T goi18n.TranslateFunc `json:"-"` + UserMentions UserMentionMap `json:"-"` + ChannelMentions ChannelMentionMap `json:"-"` + + // DO NOT USE Session field is deprecated. MM-26398 + Session Session `json:"-"` +} + +func (o *CommandArgs) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func CommandArgsFromJson(data io.Reader) *CommandArgs { + var o *CommandArgs + json.NewDecoder(data).Decode(&o) + return o +} + +// AddUserMention adds or overrides an entry in UserMentions with name username +// and identifier userId +func (o *CommandArgs) AddUserMention(username, userId string) { + if o.UserMentions == nil { + o.UserMentions = make(UserMentionMap) + } + + o.UserMentions[username] = userId +} + +// AddChannelMention adds or overrides an entry in ChannelMentions with name +// channelName and identifier channelId +func (o *CommandArgs) AddChannelMention(channelName, channelId string) { + if o.ChannelMentions == nil { + o.ChannelMentions = make(ChannelMentionMap) + } + + o.ChannelMentions[channelName] = channelId +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/command_autocomplete.go b/vendor/github.com/mattermost/mattermost-server/v5/model/command_autocomplete.go new file mode 100644 index 0000000..f115ed2 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/command_autocomplete.go @@ -0,0 +1,455 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "net/url" + "path" + "reflect" + "strings" + + "github.com/pkg/errors" +) + +// AutocompleteArgType describes autocomplete argument type +type AutocompleteArgType string + +// Argument types +const ( + AutocompleteArgTypeText AutocompleteArgType = "TextInput" + AutocompleteArgTypeStaticList AutocompleteArgType = "StaticList" + AutocompleteArgTypeDynamicList AutocompleteArgType = "DynamicList" +) + +// AutocompleteData describes slash command autocomplete information. +type AutocompleteData struct { + // Trigger of the command + Trigger string + // Hint of a command + Hint string + // Text displayed to the user to help with the autocomplete description + HelpText string + // Role of the user who should be able to see the autocomplete info of this command + RoleID string + // Arguments of the command. Arguments can be named or positional. + // If they are positional order in the list matters, if they are named order does not matter. + // All arguments should be either named or positional, no mixing allowed. + Arguments []*AutocompleteArg + // Subcommands of the command + SubCommands []*AutocompleteData +} + +// AutocompleteArg describes an argument of the command. Arguments can be named or positional. +// If Name is empty string Argument is positional otherwise it is named argument. +// Named arguments are passed as --Name Argument_Value. +type AutocompleteArg struct { + // Name of the argument + Name string + // Text displayed to the user to help with the autocomplete + HelpText string + // Type of the argument + Type AutocompleteArgType + // Required determines if argument is optional or not. + Required bool + // Actual data of the argument (depends on the Type) + Data interface{} +} + +// AutocompleteTextArg describes text user can input as an argument. +type AutocompleteTextArg struct { + // Hint of the input text + Hint string + // Regex pattern to match + Pattern string +} + +// AutocompleteListItem describes an item in the AutocompleteStaticListArg. +type AutocompleteListItem struct { + Item string + Hint string + HelpText string +} + +// AutocompleteStaticListArg is used to input one of the arguments from the list, +// for example [yes, no], [on, off], and so on. +type AutocompleteStaticListArg struct { + PossibleArguments []AutocompleteListItem +} + +// AutocompleteDynamicListArg is used when user wants to download possible argument list from the URL. +type AutocompleteDynamicListArg struct { + FetchURL string +} + +// AutocompleteSuggestion describes a single suggestion item sent to the front-end +// Example: for user input `/jira cre` - +// Complete might be `/jira create` +// Suggestion might be `create`, +// Hint might be `[issue text]`, +// Description might be `Create a new Issue` +type AutocompleteSuggestion struct { + // Complete describes completed suggestion + Complete string + // Suggestion describes what user might want to input next + Suggestion string + // Hint describes a hint about the suggested input + Hint string + // Description of the command or a suggestion + Description string + // IconData is base64 encoded svg image + IconData string +} + +// NewAutocompleteData returns new Autocomplete data. +func NewAutocompleteData(trigger, hint, helpText string) *AutocompleteData { + return &AutocompleteData{ + Trigger: trigger, + Hint: hint, + HelpText: helpText, + RoleID: SYSTEM_USER_ROLE_ID, + Arguments: []*AutocompleteArg{}, + SubCommands: []*AutocompleteData{}, + } +} + +// AddCommand adds a subcommand to the autocomplete data. +func (ad *AutocompleteData) AddCommand(command *AutocompleteData) { + ad.SubCommands = append(ad.SubCommands, command) +} + +// AddTextArgument adds positional AutocompleteArgTypeText argument to the command. +func (ad *AutocompleteData) AddTextArgument(helpText, hint, pattern string) { + ad.AddNamedTextArgument("", helpText, hint, pattern, true) +} + +// AddNamedTextArgument adds named AutocompleteArgTypeText argument to the command. +func (ad *AutocompleteData) AddNamedTextArgument(name, helpText, hint, pattern string, required bool) { + argument := AutocompleteArg{ + Name: name, + HelpText: helpText, + Type: AutocompleteArgTypeText, + Required: required, + Data: &AutocompleteTextArg{Hint: hint, Pattern: pattern}, + } + ad.Arguments = append(ad.Arguments, &argument) +} + +// AddStaticListArgument adds positional AutocompleteArgTypeStaticList argument to the command. +func (ad *AutocompleteData) AddStaticListArgument(helpText string, required bool, items []AutocompleteListItem) { + ad.AddNamedStaticListArgument("", helpText, required, items) +} + +// AddNamedStaticListArgument adds named AutocompleteArgTypeStaticList argument to the command. +func (ad *AutocompleteData) AddNamedStaticListArgument(name, helpText string, required bool, items []AutocompleteListItem) { + argument := AutocompleteArg{ + Name: name, + HelpText: helpText, + Type: AutocompleteArgTypeStaticList, + Required: required, + Data: &AutocompleteStaticListArg{PossibleArguments: items}, + } + ad.Arguments = append(ad.Arguments, &argument) +} + +// AddDynamicListArgument adds positional AutocompleteArgTypeDynamicList argument to the command. +func (ad *AutocompleteData) AddDynamicListArgument(helpText, url string, required bool) { + ad.AddNamedDynamicListArgument("", helpText, url, required) +} + +// AddNamedDynamicListArgument adds named AutocompleteArgTypeDynamicList argument to the command. +func (ad *AutocompleteData) AddNamedDynamicListArgument(name, helpText, url string, required bool) { + argument := AutocompleteArg{ + Name: name, + HelpText: helpText, + Type: AutocompleteArgTypeDynamicList, + Required: required, + Data: &AutocompleteDynamicListArg{FetchURL: url}, + } + ad.Arguments = append(ad.Arguments, &argument) +} + +// Equals method checks if command is the same. +func (ad *AutocompleteData) Equals(command *AutocompleteData) bool { + if !(ad.Trigger == command.Trigger && ad.HelpText == command.HelpText && ad.RoleID == command.RoleID && ad.Hint == command.Hint) { + return false + } + if len(ad.Arguments) != len(command.Arguments) || len(ad.SubCommands) != len(command.SubCommands) { + return false + } + for i := range ad.Arguments { + if !ad.Arguments[i].Equals(command.Arguments[i]) { + return false + } + } + for i := range ad.SubCommands { + if !ad.SubCommands[i].Equals(command.SubCommands[i]) { + return false + } + } + return true +} + +// UpdateRelativeURLsForPluginCommands method updates relative urls for plugin commands +func (ad *AutocompleteData) UpdateRelativeURLsForPluginCommands(baseURL *url.URL) error { + for _, arg := range ad.Arguments { + if arg.Type != AutocompleteArgTypeDynamicList { + continue + } + dynamicList, ok := arg.Data.(*AutocompleteDynamicListArg) + if !ok { + return errors.New("Not a proper DynamicList type argument") + } + dynamicListURL, err := url.Parse(dynamicList.FetchURL) + if err != nil { + return errors.Wrapf(err, "FetchURL is not a proper url") + } + if !dynamicListURL.IsAbs() { + absURL := &url.URL{} + *absURL = *baseURL + absURL.Path = path.Join(absURL.Path, dynamicList.FetchURL) + dynamicList.FetchURL = absURL.String() + } + + } + for _, command := range ad.SubCommands { + err := command.UpdateRelativeURLsForPluginCommands(baseURL) + if err != nil { + return err + } + } + return nil +} + +// IsValid method checks if autocomplete data is valid. +func (ad *AutocompleteData) IsValid() error { + if ad == nil { + return errors.New("No nil commands are allowed in AutocompleteData") + } + if ad.Trigger == "" { + return errors.New("An empty command name in the autocomplete data") + } + if strings.ToLower(ad.Trigger) != ad.Trigger { + return errors.New("Command should be lowercase") + } + roles := []string{SYSTEM_ADMIN_ROLE_ID, SYSTEM_USER_ROLE_ID, ""} + if stringNotInSlice(ad.RoleID, roles) { + return errors.New("Wrong role in the autocomplete data") + } + if len(ad.Arguments) > 0 && len(ad.SubCommands) > 0 { + return errors.New("Command can't have arguments and subcommands") + } + if len(ad.Arguments) > 0 { + namedArgumentIndex := -1 + for i, arg := range ad.Arguments { + if arg.Name != "" { // it's a named argument + if namedArgumentIndex == -1 { // first named argument + namedArgumentIndex = i + } + } else { // it's a positional argument + if namedArgumentIndex != -1 { + return errors.New("Named argument should not be before positional argument") + } + } + if arg.Type == AutocompleteArgTypeDynamicList { + dynamicList, ok := arg.Data.(*AutocompleteDynamicListArg) + if !ok { + return errors.New("Not a proper DynamicList type argument") + } + _, err := url.Parse(dynamicList.FetchURL) + if err != nil { + return errors.Wrapf(err, "FetchURL is not a proper url") + } + } else if arg.Type == AutocompleteArgTypeStaticList { + staticList, ok := arg.Data.(*AutocompleteStaticListArg) + if !ok { + return errors.New("Not a proper StaticList type argument") + } + for _, arg := range staticList.PossibleArguments { + if arg.Item == "" { + return errors.New("Possible argument name not set in StaticList argument") + } + } + } else if arg.Type == AutocompleteArgTypeText { + if _, ok := arg.Data.(*AutocompleteTextArg); !ok { + return errors.New("Not a proper TextInput type argument") + } + if arg.Name == "" && !arg.Required { + return errors.New("Positional argument can not be optional") + } + } + } + } + for _, command := range ad.SubCommands { + err := command.IsValid() + if err != nil { + return err + } + } + return nil +} + +// ToJSON encodes AutocompleteData struct to the json +func (ad *AutocompleteData) ToJSON() ([]byte, error) { + b, err := json.Marshal(ad) + if err != nil { + return nil, errors.Wrapf(err, "can't marshal slash command %s", ad.Trigger) + } + return b, nil +} + +// AutocompleteDataFromJSON decodes AutocompleteData struct from the json +func AutocompleteDataFromJSON(data []byte) (*AutocompleteData, error) { + var ad AutocompleteData + if err := json.Unmarshal(data, &ad); err != nil { + return nil, errors.Wrap(err, "can't unmarshal AutocompleteData") + } + return &ad, nil +} + +// Equals method checks if argument is the same. +func (a *AutocompleteArg) Equals(arg *AutocompleteArg) bool { + if a.Name != arg.Name || + a.HelpText != arg.HelpText || + a.Type != arg.Type || + a.Required != arg.Required || + !reflect.DeepEqual(a.Data, arg.Data) { + return false + } + return true +} + +// UnmarshalJSON will unmarshal argument +func (a *AutocompleteArg) UnmarshalJSON(b []byte) error { + var arg map[string]interface{} + if err := json.Unmarshal(b, &arg); err != nil { + return errors.Wrapf(err, "Can't unmarshal argument %s", string(b)) + } + var ok bool + a.Name, ok = arg["Name"].(string) + if !ok { + return errors.Errorf("No field Name in the argument %s", string(b)) + } + + a.HelpText, ok = arg["HelpText"].(string) + if !ok { + return errors.Errorf("No field HelpText in the argument %s", string(b)) + } + + t, ok := arg["Type"].(string) + if !ok { + return errors.Errorf("No field Type in the argument %s", string(b)) + } + a.Type = AutocompleteArgType(t) + + a.Required, ok = arg["Required"].(bool) + if !ok { + return errors.Errorf("No field Required in the argument %s", string(b)) + } + + data, ok := arg["Data"] + if !ok { + return errors.Errorf("No field Data in the argument %s", string(b)) + } + + if a.Type == AutocompleteArgTypeText { + m, ok := data.(map[string]interface{}) + if !ok { + return errors.Errorf("Wrong Data type in the TextInput argument %s", string(b)) + } + pattern, ok := m["Pattern"].(string) + if !ok { + return errors.Errorf("No field Pattern in the TextInput argument %s", string(b)) + } + hint, ok := m["Hint"].(string) + if !ok { + return errors.Errorf("No field Hint in the TextInput argument %s", string(b)) + } + a.Data = &AutocompleteTextArg{Hint: hint, Pattern: pattern} + } else if a.Type == AutocompleteArgTypeStaticList { + m, ok := data.(map[string]interface{}) + if !ok { + return errors.Errorf("Wrong Data type in the StaticList argument %s", string(b)) + } + list, ok := m["PossibleArguments"].([]interface{}) + if !ok { + return errors.Errorf("No field PossibleArguments in the StaticList argument %s", string(b)) + } + + possibleArguments := []AutocompleteListItem{} + for i := range list { + args, ok := list[i].(map[string]interface{}) + if !ok { + return errors.Errorf("Wrong AutocompleteStaticListItem type in the StaticList argument %s", string(b)) + } + item, ok := args["Item"].(string) + if !ok { + return errors.Errorf("No field Item in the StaticList's possible arguments %s", string(b)) + } + + hint, ok := args["Hint"].(string) + if !ok { + return errors.Errorf("No field Hint in the StaticList's possible arguments %s", string(b)) + } + helpText, ok := args["HelpText"].(string) + if !ok { + return errors.Errorf("No field Hint in the StaticList's possible arguments %s", string(b)) + } + + possibleArguments = append(possibleArguments, AutocompleteListItem{ + Item: item, + Hint: hint, + HelpText: helpText, + }) + } + a.Data = &AutocompleteStaticListArg{PossibleArguments: possibleArguments} + } else if a.Type == AutocompleteArgTypeDynamicList { + m, ok := data.(map[string]interface{}) + if !ok { + return errors.Errorf("Wrong type in the DynamicList argument %s", string(b)) + } + url, ok := m["FetchURL"].(string) + if !ok { + return errors.Errorf("No field FetchURL in the DynamicList's argument %s", string(b)) + } + a.Data = &AutocompleteDynamicListArg{FetchURL: url} + } + return nil +} + +// AutocompleteSuggestionsToJSON returns json for a list of AutocompleteSuggestion objects +func AutocompleteSuggestionsToJSON(suggestions []AutocompleteSuggestion) []byte { + b, _ := json.Marshal(suggestions) + return b +} + +// AutocompleteSuggestionsFromJSON returns list of AutocompleteSuggestions from json. +func AutocompleteSuggestionsFromJSON(data io.Reader) []AutocompleteSuggestion { + var o []AutocompleteSuggestion + json.NewDecoder(data).Decode(&o) + return o +} + +// AutocompleteStaticListItemsToJSON returns json for a list of AutocompleteStaticListItem objects +func AutocompleteStaticListItemsToJSON(items []AutocompleteListItem) []byte { + b, _ := json.Marshal(items) + return b +} + +// AutocompleteStaticListItemsFromJSON returns list of AutocompleteStaticListItem from json. +func AutocompleteStaticListItemsFromJSON(data io.Reader) []AutocompleteListItem { + var o []AutocompleteListItem + json.NewDecoder(data).Decode(&o) + return o +} + +func stringNotInSlice(a string, slice []string) bool { + for _, b := range slice { + if b == a { + return false + } + } + return true +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/command_request.go b/vendor/github.com/mattermost/mattermost-server/v5/model/command_request.go new file mode 100644 index 0000000..9a4e40c --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/command_request.go @@ -0,0 +1,31 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type CommandMoveRequest struct { + TeamId string `json:"team_id"` +} + +func CommandMoveRequestFromJson(data io.Reader) (*CommandMoveRequest, error) { + decoder := json.NewDecoder(data) + var cmr CommandMoveRequest + err := decoder.Decode(&cmr) + if err != nil { + return nil, err + } + return &cmr, nil +} + +func (cmr *CommandMoveRequest) ToJson() string { + b, err := json.Marshal(cmr) + if err != nil { + return "" + } + return string(b) +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/command_response.go b/vendor/github.com/mattermost/mattermost-server/v5/model/command_response.go similarity index 57% rename from vendor/github.com/mattermost/mattermost-server/model/command_response.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/command_response.go index 6335cef..26b6cce 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/command_response.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/command_response.go @@ -1,5 +1,5 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -9,7 +9,7 @@ import ( "io/ioutil" "strings" - "github.com/mattermost/mattermost-server/utils/jsonutils" + "github.com/mattermost/mattermost-server/v5/utils/jsonutils" ) const ( @@ -18,17 +18,18 @@ const ( ) type CommandResponse struct { - ResponseType string `json:"response_type"` - Text string `json:"text"` - Username string `json:"username"` - ChannelId string `json:"channel_id"` - IconURL string `json:"icon_url"` - Type string `json:"type"` - Props StringInterface `json:"props"` - GotoLocation string `json:"goto_location"` - TriggerId string `json:"trigger_id"` - Attachments []*SlackAttachment `json:"attachments"` - ExtraResponses []*CommandResponse `json:"extra_responses"` + ResponseType string `json:"response_type"` + Text string `json:"text"` + Username string `json:"username"` + ChannelId string `json:"channel_id"` + IconURL string `json:"icon_url"` + Type string `json:"type"` + Props StringInterface `json:"props"` + GotoLocation string `json:"goto_location"` + TriggerId string `json:"trigger_id"` + SkipSlackParsing bool `json:"skip_slack_parsing"` // Set to `true` to skip the Slack-compatibility handling of Text. + Attachments []*SlackAttachment `json:"attachments"` + ExtraResponses []*CommandResponse `json:"extra_responses"` } func (o *CommandResponse) ToJson() string { diff --git a/vendor/github.com/mattermost/mattermost-server/model/command_webhook.go b/vendor/github.com/mattermost/mattermost-server/v5/model/command_webhook.go similarity index 81% rename from vendor/github.com/mattermost/mattermost-server/model/command_webhook.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/command_webhook.go index 0b00e00..3757ecc 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/command_webhook.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/command_webhook.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -33,7 +33,7 @@ func (o *CommandWebhook) PreSave() { } func (o *CommandWebhook) IsValid() *AppError { - if len(o.Id) != 26 { + if !IsValidId(o.Id) { return NewAppError("CommandWebhook.IsValid", "model.command_hook.id.app_error", nil, "", http.StatusBadRequest) } @@ -41,23 +41,23 @@ func (o *CommandWebhook) IsValid() *AppError { return NewAppError("CommandWebhook.IsValid", "model.command_hook.create_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) } - if len(o.CommandId) != 26 { + if !IsValidId(o.CommandId) { return NewAppError("CommandWebhook.IsValid", "model.command_hook.command_id.app_error", nil, "", http.StatusBadRequest) } - if len(o.UserId) != 26 { + if !IsValidId(o.UserId) { return NewAppError("CommandWebhook.IsValid", "model.command_hook.user_id.app_error", nil, "", http.StatusBadRequest) } - if len(o.ChannelId) != 26 { + if !IsValidId(o.ChannelId) { return NewAppError("CommandWebhook.IsValid", "model.command_hook.channel_id.app_error", nil, "", http.StatusBadRequest) } - if len(o.RootId) != 0 && len(o.RootId) != 26 { + if o.RootId != "" && !IsValidId(o.RootId) { return NewAppError("CommandWebhook.IsValid", "model.command_hook.root_id.app_error", nil, "", http.StatusBadRequest) } - if len(o.ParentId) != 0 && len(o.ParentId) != 26 { + if o.ParentId != "" && !IsValidId(o.ParentId) { return NewAppError("CommandWebhook.IsValid", "model.command_hook.parent_id.app_error", nil, "", http.StatusBadRequest) } diff --git a/vendor/github.com/mattermost/mattermost-server/model/compliance.go b/vendor/github.com/mattermost/mattermost-server/v5/model/compliance.go similarity index 64% rename from vendor/github.com/mattermost/mattermost-server/model/compliance.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/compliance.go index 5546b78..2bb32de 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/compliance.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/compliance.go @@ -1,5 +1,5 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -37,61 +37,66 @@ type Compliance struct { type Compliances []Compliance -func (o *Compliance) ToJson() string { - b, _ := json.Marshal(o) +func (c *Compliance) ToJson() string { + b, _ := json.Marshal(c) return string(b) } -func (me *Compliance) PreSave() { - if me.Id == "" { - me.Id = NewId() +func (c *Compliance) PreSave() { + if c.Id == "" { + c.Id = NewId() } - if me.Status == "" { - me.Status = COMPLIANCE_STATUS_CREATED + if c.Status == "" { + c.Status = COMPLIANCE_STATUS_CREATED } - me.Count = 0 - me.Emails = NormalizeEmail(me.Emails) - me.Keywords = strings.ToLower(me.Keywords) + c.Count = 0 + c.Emails = NormalizeEmail(c.Emails) + c.Keywords = strings.ToLower(c.Keywords) - me.CreateAt = GetMillis() + c.CreateAt = GetMillis() } -func (me *Compliance) JobName() string { - jobName := me.Type - if me.Type == COMPLIANCE_TYPE_DAILY { - jobName += "-" + me.Desc +func (c *Compliance) DeepCopy() *Compliance { + copy := *c + return © +} + +func (c *Compliance) JobName() string { + jobName := c.Type + if c.Type == COMPLIANCE_TYPE_DAILY { + jobName += "-" + c.Desc } - jobName += "-" + me.Id + jobName += "-" + c.Id return jobName } -func (me *Compliance) IsValid() *AppError { +func (c *Compliance) IsValid() *AppError { - if len(me.Id) != 26 { + if !IsValidId(c.Id) { return NewAppError("Compliance.IsValid", "model.compliance.is_valid.id.app_error", nil, "", http.StatusBadRequest) } - if me.CreateAt == 0 { + if c.CreateAt == 0 { return NewAppError("Compliance.IsValid", "model.compliance.is_valid.create_at.app_error", nil, "", http.StatusBadRequest) } - if len(me.Desc) > 512 || len(me.Desc) == 0 { + if len(c.Desc) > 512 || len(c.Desc) == 0 { return NewAppError("Compliance.IsValid", "model.compliance.is_valid.desc.app_error", nil, "", http.StatusBadRequest) } - if me.StartAt == 0 { + if c.StartAt == 0 { return NewAppError("Compliance.IsValid", "model.compliance.is_valid.start_at.app_error", nil, "", http.StatusBadRequest) } - if me.EndAt == 0 { + if c.EndAt == 0 { return NewAppError("Compliance.IsValid", "model.compliance.is_valid.end_at.app_error", nil, "", http.StatusBadRequest) } - if me.EndAt <= me.StartAt { + if c.EndAt <= c.StartAt { return NewAppError("Compliance.IsValid", "model.compliance.is_valid.start_end_at.app_error", nil, "", http.StatusBadRequest) } @@ -99,17 +104,17 @@ func (me *Compliance) IsValid() *AppError { } func ComplianceFromJson(data io.Reader) *Compliance { - var o *Compliance - json.NewDecoder(data).Decode(&o) - return o + var c *Compliance + json.NewDecoder(data).Decode(&c) + return c } -func (o Compliances) ToJson() string { - if b, err := json.Marshal(o); err != nil { +func (c Compliances) ToJson() string { + b, err := json.Marshal(c) + if err != nil { return "[]" - } else { - return string(b) } + return string(b) } func CompliancesFromJson(data io.Reader) Compliances { diff --git a/vendor/github.com/mattermost/mattermost-server/model/compliance_post.go b/vendor/github.com/mattermost/mattermost-server/v5/model/compliance_post.go similarity index 59% rename from vendor/github.com/mattermost/mattermost-server/model/compliance_post.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/compliance_post.go index ac3cf4e..37c1145 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/compliance_post.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/compliance_post.go @@ -1,5 +1,5 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -73,54 +73,52 @@ func CompliancePostHeader() []string { func cleanComplianceStrings(in string) string { if matched, _ := regexp.MatchString("^\\s*(=|\\+|\\-)", in); matched { return "'" + in - - } else { - return in } + return in } -func (me *CompliancePost) Row() []string { +func (cp *CompliancePost) Row() []string { postDeleteAt := "" - if me.PostDeleteAt > 0 { - postDeleteAt = time.Unix(0, me.PostDeleteAt*int64(1000*1000)).Format(time.RFC3339) + if cp.PostDeleteAt > 0 { + postDeleteAt = time.Unix(0, cp.PostDeleteAt*int64(1000*1000)).Format(time.RFC3339) } postUpdateAt := "" - if me.PostUpdateAt != me.PostCreateAt { - postUpdateAt = time.Unix(0, me.PostUpdateAt*int64(1000*1000)).Format(time.RFC3339) + if cp.PostUpdateAt != cp.PostCreateAt { + postUpdateAt = time.Unix(0, cp.PostUpdateAt*int64(1000*1000)).Format(time.RFC3339) } userType := "user" - if me.IsBot { + if cp.IsBot { userType = "bot" } return []string{ - cleanComplianceStrings(me.TeamName), - cleanComplianceStrings(me.TeamDisplayName), + cleanComplianceStrings(cp.TeamName), + cleanComplianceStrings(cp.TeamDisplayName), - cleanComplianceStrings(me.ChannelName), - cleanComplianceStrings(me.ChannelDisplayName), - cleanComplianceStrings(me.ChannelType), + cleanComplianceStrings(cp.ChannelName), + cleanComplianceStrings(cp.ChannelDisplayName), + cleanComplianceStrings(cp.ChannelType), - cleanComplianceStrings(me.UserUsername), - cleanComplianceStrings(me.UserEmail), - cleanComplianceStrings(me.UserNickname), + cleanComplianceStrings(cp.UserUsername), + cleanComplianceStrings(cp.UserEmail), + cleanComplianceStrings(cp.UserNickname), userType, - me.PostId, - time.Unix(0, me.PostCreateAt*int64(1000*1000)).Format(time.RFC3339), + cp.PostId, + time.Unix(0, cp.PostCreateAt*int64(1000*1000)).Format(time.RFC3339), postUpdateAt, postDeleteAt, - me.PostRootId, - me.PostParentId, - me.PostOriginalId, - cleanComplianceStrings(me.PostMessage), - me.PostType, - me.PostProps, - me.PostHashtags, - me.PostFileIds, + cp.PostRootId, + cp.PostParentId, + cp.PostOriginalId, + cleanComplianceStrings(cp.PostMessage), + cp.PostType, + cp.PostProps, + cp.PostHashtags, + cp.PostFileIds, } } diff --git a/vendor/github.com/mattermost/mattermost-server/model/config.go b/vendor/github.com/mattermost/mattermost-server/v5/model/config.go similarity index 50% rename from vendor/github.com/mattermost/mattermost-server/model/config.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/config.go index 64de631..fd9b2f1 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/config.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/config.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -12,12 +12,15 @@ import ( "net/http" "net/url" "os" + "reflect" "regexp" "strconv" "strings" "time" "github.com/mattermost/ldap" + + "github.com/mattermost/mattermost-server/v5/mlog" ) const ( @@ -43,11 +46,14 @@ const ( SERVICE_GITLAB = "gitlab" SERVICE_GOOGLE = "google" SERVICE_OFFICE365 = "office365" + SERVICE_OPENID = "openid" GENERIC_NO_CHANNEL_NOTIFICATION = "generic_no_channel" GENERIC_NOTIFICATION = "generic" GENERIC_NOTIFICATION_SERVER = "https://push-test.mattermost.com" + MM_SUPPORT_ADVISOR_ADDRESS = "support-advisor@mattermost.com" FULL_NOTIFICATION = "full" + ID_LOADED_NOTIFICATION = "id_loaded" DIRECT_MESSAGE_ANY = "any" DIRECT_MESSAGE_TEAM = "team" @@ -79,6 +85,10 @@ const ( GROUP_UNREAD_CHANNELS_DEFAULT_ON = "default_on" GROUP_UNREAD_CHANNELS_DEFAULT_OFF = "default_off" + COLLAPSED_THREADS_DISABLED = "disabled" + COLLAPSED_THREADS_DEFAULT_ON = "default_on" + COLLAPSED_THREADS_DEFAULT_OFF = "default_off" + EMAIL_BATCHING_BUFFER_SIZE = 256 EMAIL_BATCHING_INTERVAL = 30 @@ -92,6 +102,7 @@ const ( SERVICE_SETTINGS_DEFAULT_TLS_KEY_FILE = "" SERVICE_SETTINGS_DEFAULT_READ_TIMEOUT = 300 SERVICE_SETTINGS_DEFAULT_WRITE_TIMEOUT = 300 + SERVICE_SETTINGS_DEFAULT_IDLE_TIMEOUT = 60 SERVICE_SETTINGS_DEFAULT_MAX_LOGIN_ATTEMPTS = 10 SERVICE_SETTINGS_DEFAULT_ALLOW_CORS_FROM = "" SERVICE_SETTINGS_DEFAULT_LISTEN_AND_ADDRESS = ":8065" @@ -104,10 +115,13 @@ const ( TEAM_SETTINGS_DEFAULT_CUSTOM_DESCRIPTION_TEXT = "" TEAM_SETTINGS_DEFAULT_USER_STATUS_AWAY_TIMEOUT = 300 - SQL_SETTINGS_DEFAULT_DATA_SOURCE = "mmuser:mostest@tcp(dockerhost:3306)/mattermost_test?charset=utf8mb4,utf8&readTimeout=30s&writeTimeout=30s" + SQL_SETTINGS_DEFAULT_DATA_SOURCE = "postgres://mmuser:mostest@localhost/mattermost_test?sslmode=disable&connect_timeout=10" FILE_SETTINGS_DEFAULT_DIRECTORY = "./data/" + IMPORT_SETTINGS_DEFAULT_DIRECTORY = "./import" + IMPORT_SETTINGS_DEFAULT_RETENTION_DAYS = 30 + EMAIL_SETTINGS_DEFAULT_FEEDBACK_ORGANIZATION = "" SUPPORT_SETTINGS_DEFAULT_TERMS_OF_SERVICE_LINK = "https://about.mattermost.com/default-terms/" @@ -128,8 +142,11 @@ const ( LDAP_SETTINGS_DEFAULT_LOGIN_FIELD_NAME = "" LDAP_SETTINGS_DEFAULT_GROUP_DISPLAY_NAME_ATTRIBUTE = "" LDAP_SETTINGS_DEFAULT_GROUP_ID_ATTRIBUTE = "" + LDAP_SETTINGS_DEFAULT_PICTURE_ATTRIBUTE = "" SAML_SETTINGS_DEFAULT_ID_ATTRIBUTE = "" + SAML_SETTINGS_DEFAULT_GUEST_ATTRIBUTE = "" + SAML_SETTINGS_DEFAULT_ADMIN_ATTRIBUTE = "" SAML_SETTINGS_DEFAULT_FIRST_NAME_ATTRIBUTE = "" SAML_SETTINGS_DEFAULT_LAST_NAME_ATTRIBUTE = "" SAML_SETTINGS_DEFAULT_EMAIL_ATTRIBUTE = "" @@ -138,7 +155,16 @@ const ( SAML_SETTINGS_DEFAULT_LOCALE_ATTRIBUTE = "" SAML_SETTINGS_DEFAULT_POSITION_ATTRIBUTE = "" - NATIVEAPP_SETTINGS_DEFAULT_APP_DOWNLOAD_LINK = "https://about.mattermost.com/downloads/" + SAML_SETTINGS_SIGNATURE_ALGORITHM_SHA1 = "RSAwithSHA1" + SAML_SETTINGS_SIGNATURE_ALGORITHM_SHA256 = "RSAwithSHA256" + SAML_SETTINGS_SIGNATURE_ALGORITHM_SHA512 = "RSAwithSHA512" + SAML_SETTINGS_DEFAULT_SIGNATURE_ALGORITHM = SAML_SETTINGS_SIGNATURE_ALGORITHM_SHA1 + + SAML_SETTINGS_CANONICAL_ALGORITHM_C14N = "Canonical1.0" + SAML_SETTINGS_CANONICAL_ALGORITHM_C14N11 = "Canonical1.1" + SAML_SETTINGS_DEFAULT_CANONICAL_ALGORITHM = SAML_SETTINGS_CANONICAL_ALGORITHM_C14N + + NATIVEAPP_SETTINGS_DEFAULT_APP_DOWNLOAD_LINK = "https://mattermost.com/download/#mattermostApps" NATIVEAPP_SETTINGS_DEFAULT_ANDROID_APP_DOWNLOAD_LINK = "https://about.mattermost.com/mattermost-android-app/" NATIVEAPP_SETTINGS_DEFAULT_IOS_APP_DOWNLOAD_LINK = "https://about.mattermost.com/mattermost-ios-app/" @@ -146,12 +172,14 @@ const ( ANALYTICS_SETTINGS_DEFAULT_MAX_USERS_FOR_STATISTICS = 2500 - ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_COLOR = "#f2a93b" - ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_TEXT_COLOR = "#333333" + ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_COLOR = "#f2a93b" + ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_TEXT_COLOR = "#333333" + ANNOUNCEMENT_SETTINGS_DEFAULT_NOTICES_JSON_URL = "https://notices.mattermost.com/" + ANNOUNCEMENT_SETTINGS_DEFAULT_NOTICES_FETCH_FREQUENCY_SECONDS = 3600 TEAM_SETTINGS_DEFAULT_TEAM_TEXT = "default" - ELASTICSEARCH_SETTINGS_DEFAULT_CONNECTION_URL = "http://dockerhost:9200" + ELASTICSEARCH_SETTINGS_DEFAULT_CONNECTION_URL = "http://localhost:9200" ELASTICSEARCH_SETTINGS_DEFAULT_USERNAME = "elastic" ELASTICSEARCH_SETTINGS_DEFAULT_PASSWORD = "changeme" ELASTICSEARCH_SETTINGS_DEFAULT_POST_INDEX_REPLICAS = 1 @@ -167,12 +195,18 @@ const ( ELASTICSEARCH_SETTINGS_DEFAULT_BULK_INDEXING_TIME_WINDOW_SECONDS = 3600 ELASTICSEARCH_SETTINGS_DEFAULT_REQUEST_TIMEOUT_SECONDS = 30 + BLEVE_SETTINGS_DEFAULT_INDEX_DIR = "" + BLEVE_SETTINGS_DEFAULT_BULK_INDEXING_TIME_WINDOW_SECONDS = 3600 + DATA_RETENTION_SETTINGS_DEFAULT_MESSAGE_RETENTION_DAYS = 365 DATA_RETENTION_SETTINGS_DEFAULT_FILE_RETENTION_DAYS = 365 DATA_RETENTION_SETTINGS_DEFAULT_DELETION_JOB_START_TIME = "02:00" - PLUGIN_SETTINGS_DEFAULT_DIRECTORY = "./plugins" - PLUGIN_SETTINGS_DEFAULT_CLIENT_DIRECTORY = "./client/plugins" + PLUGIN_SETTINGS_DEFAULT_DIRECTORY = "./plugins" + PLUGIN_SETTINGS_DEFAULT_CLIENT_DIRECTORY = "./client/plugins" + PLUGIN_SETTINGS_DEFAULT_ENABLE_MARKETPLACE = true + PLUGIN_SETTINGS_DEFAULT_MARKETPLACE_URL = "https://api.integrations.mattermost.com" + PLUGIN_SETTINGS_OLD_MARKETPLACE_URL = "https://marketplace.integrations.mattermost.com" COMPLIANCE_EXPORT_TYPE_CSV = "csv" COMPLIANCE_EXPORT_TYPE_ACTIANCE = "actiance" @@ -196,8 +230,17 @@ const ( OFFICE365_SETTINGS_DEFAULT_AUTH_ENDPOINT = "https://login.microsoftonline.com/common/oauth2/v2.0/authorize" OFFICE365_SETTINGS_DEFAULT_TOKEN_ENDPOINT = "https://login.microsoftonline.com/common/oauth2/v2.0/token" OFFICE365_SETTINGS_DEFAULT_USER_API_ENDPOINT = "https://graph.microsoft.com/v1.0/me" + + CLOUD_SETTINGS_DEFAULT_CWS_URL = "https://customers.mattermost.com" + OPENID_SETTINGS_DEFAULT_SCOPE = "profile openid email" + + LOCAL_MODE_SOCKET_PATH = "/var/tmp/mattermost_local.socket" ) +func GetDefaultAppCustomURLSchemes() []string { + return []string{"mmauth://", "mmauthbeta://"} +} + var ServerTLSSupportedCiphers = map[string]uint16{ "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, @@ -224,90 +267,105 @@ var ServerTLSSupportedCiphers = map[string]uint16{ } type ServiceSettings struct { - SiteURL *string `restricted:"true"` - WebsocketURL *string `restricted:"true"` - LicenseFileLocation *string `restricted:"true"` - ListenAddress *string `restricted:"true"` - ConnectionSecurity *string `restricted:"true"` - TLSCertFile *string `restricted:"true"` - TLSKeyFile *string `restricted:"true"` - TLSMinVer *string `restricted:"true"` - TLSStrictTransport *bool `restricted:"true"` - TLSStrictTransportMaxAge *int64 `restricted:"true"` - TLSOverwriteCiphers []string `restricted:"true"` - UseLetsEncrypt *bool `restricted:"true"` - LetsEncryptCertificateCacheFile *string `restricted:"true"` - Forward80To443 *bool `restricted:"true"` - TrustedProxyIPHeader []string `restricted:"true"` - ReadTimeout *int `restricted:"true"` - WriteTimeout *int `restricted:"true"` - MaximumLoginAttempts *int `restricted:"true"` - GoroutineHealthThreshold *int `restricted:"true"` - GoogleDeveloperKey *string `restricted:"true"` - EnableOAuthServiceProvider *bool - EnableIncomingWebhooks *bool - EnableOutgoingWebhooks *bool - EnableCommands *bool - DEPRECATED_DO_NOT_USE_EnableOnlyAdminIntegrations *bool `json:"EnableOnlyAdminIntegrations" mapstructure:"EnableOnlyAdminIntegrations"` // This field is deprecated and must not be used. - EnablePostUsernameOverride *bool - EnablePostIconOverride *bool - EnableLinkPreviews *bool - EnableTesting *bool `restricted:"true"` - EnableDeveloper *bool `restricted:"true"` - EnableSecurityFixAlert *bool `restricted:"true"` - EnableInsecureOutgoingConnections *bool `restricted:"true"` - AllowedUntrustedInternalConnections *string `restricted:"true"` - EnableMultifactorAuthentication *bool - EnforceMultifactorAuthentication *bool - EnableUserAccessTokens *bool - AllowCorsFrom *string `restricted:"true"` - CorsExposedHeaders *string `restricted:"true"` - CorsAllowCredentials *bool `restricted:"true"` - CorsDebug *bool `restricted:"true"` - AllowCookiesForSubdomains *bool `restricted:"true"` - SessionLengthWebInDays *int `restricted:"true"` - SessionLengthMobileInDays *int `restricted:"true"` - SessionLengthSSOInDays *int `restricted:"true"` - SessionCacheInMinutes *int `restricted:"true"` - SessionIdleTimeoutInMinutes *int `restricted:"true"` - WebsocketSecurePort *int `restricted:"true"` - WebsocketPort *int `restricted:"true"` - WebserverMode *string `restricted:"true"` - EnableCustomEmoji *bool - EnableEmojiPicker *bool - EnableGifPicker *bool - GfycatApiKey *string - GfycatApiSecret *string - DEPRECATED_DO_NOT_USE_RestrictCustomEmojiCreation *string `json:"RestrictCustomEmojiCreation" mapstructure:"RestrictCustomEmojiCreation"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_RestrictPostDelete *string `json:"RestrictPostDelete" mapstructure:"RestrictPostDelete"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_AllowEditPost *string `json:"AllowEditPost" mapstructure:"AllowEditPost"` // This field is deprecated and must not be used. - PostEditTimeLimit *int - TimeBetweenUserTypingUpdatesMilliseconds *int64 `restricted:"true"` - EnablePostSearch *bool `restricted:"true"` - MinimumHashtagLength *int `restricted:"true"` - EnableUserTypingMessages *bool `restricted:"true"` - EnableChannelViewedMessages *bool `restricted:"true"` - EnableUserStatuses *bool `restricted:"true"` - ExperimentalEnableAuthenticationTransfer *bool `restricted:"true"` - ClusterLogTimeoutMilliseconds *int `restricted:"true"` - CloseUnusedDirectMessages *bool - EnablePreviewFeatures *bool - EnableTutorial *bool - ExperimentalEnableDefaultChannelLeaveJoinMessages *bool - ExperimentalGroupUnreadChannels *string - ExperimentalChannelOrganization *bool - DEPRECATED_DO_NOT_USE_ImageProxyType *string `json:"ImageProxyType" mapstructure:"ImageProxyType"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_ImageProxyURL *string `json:"ImageProxyURL" mapstructure:"ImageProxyURL"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_ImageProxyOptions *string `json:"ImageProxyOptions" mapstructure:"ImageProxyOptions"` // This field is deprecated and must not be used. + SiteURL *string `access:"environment,authentication,write_restrictable"` + WebsocketURL *string `access:"write_restrictable,cloud_restrictable"` + LicenseFileLocation *string `access:"write_restrictable,cloud_restrictable"` + ListenAddress *string `access:"environment,write_restrictable,cloud_restrictable"` + ConnectionSecurity *string `access:"environment,write_restrictable,cloud_restrictable"` + TLSCertFile *string `access:"environment,write_restrictable,cloud_restrictable"` + TLSKeyFile *string `access:"environment,write_restrictable,cloud_restrictable"` + TLSMinVer *string `access:"write_restrictable,cloud_restrictable"` + TLSStrictTransport *bool `access:"write_restrictable,cloud_restrictable"` + TLSStrictTransportMaxAge *int64 `access:"write_restrictable,cloud_restrictable"` + TLSOverwriteCiphers []string `access:"write_restrictable,cloud_restrictable"` + UseLetsEncrypt *bool `access:"environment,write_restrictable,cloud_restrictable"` + LetsEncryptCertificateCacheFile *string `access:"environment,write_restrictable,cloud_restrictable"` + Forward80To443 *bool `access:"environment,write_restrictable,cloud_restrictable"` + TrustedProxyIPHeader []string `access:"write_restrictable,cloud_restrictable"` + ReadTimeout *int `access:"environment,write_restrictable,cloud_restrictable"` + WriteTimeout *int `access:"environment,write_restrictable,cloud_restrictable"` + IdleTimeout *int `access:"write_restrictable,cloud_restrictable"` + MaximumLoginAttempts *int `access:"authentication,write_restrictable,cloud_restrictable"` + GoroutineHealthThreshold *int `access:"write_restrictable,cloud_restrictable"` + GoogleDeveloperKey *string `access:"site,write_restrictable,cloud_restrictable"` + EnableOAuthServiceProvider *bool `access:"integrations"` + EnableIncomingWebhooks *bool `access:"integrations"` + EnableOutgoingWebhooks *bool `access:"integrations"` + EnableCommands *bool `access:"integrations"` + DEPRECATED_DO_NOT_USE_EnableOnlyAdminIntegrations *bool `json:"EnableOnlyAdminIntegrations" mapstructure:"EnableOnlyAdminIntegrations"` // This field is deprecated and must not be used. + EnablePostUsernameOverride *bool `access:"integrations"` + EnablePostIconOverride *bool `access:"integrations"` + EnableLinkPreviews *bool `access:"site"` + EnableTesting *bool `access:"environment,write_restrictable,cloud_restrictable"` + EnableDeveloper *bool `access:"environment,write_restrictable,cloud_restrictable"` + EnableOpenTracing *bool `access:"write_restrictable,cloud_restrictable"` + EnableSecurityFixAlert *bool `access:"environment,write_restrictable,cloud_restrictable"` + EnableInsecureOutgoingConnections *bool `access:"environment,write_restrictable,cloud_restrictable"` + AllowedUntrustedInternalConnections *string `access:"environment,write_restrictable,cloud_restrictable"` + EnableMultifactorAuthentication *bool `access:"authentication"` + EnforceMultifactorAuthentication *bool `access:"authentication"` + EnableUserAccessTokens *bool `access:"integrations"` + AllowCorsFrom *string `access:"integrations,write_restrictable,cloud_restrictable"` + CorsExposedHeaders *string `access:"integrations,write_restrictable,cloud_restrictable"` + CorsAllowCredentials *bool `access:"integrations,write_restrictable,cloud_restrictable"` + CorsDebug *bool `access:"integrations,write_restrictable,cloud_restrictable"` + AllowCookiesForSubdomains *bool `access:"write_restrictable,cloud_restrictable"` + ExtendSessionLengthWithActivity *bool `access:"environment,write_restrictable,cloud_restrictable"` + SessionLengthWebInDays *int `access:"environment,write_restrictable,cloud_restrictable"` + SessionLengthMobileInDays *int `access:"environment,write_restrictable,cloud_restrictable"` + SessionLengthSSOInDays *int `access:"environment,write_restrictable,cloud_restrictable"` + SessionCacheInMinutes *int `access:"environment,write_restrictable,cloud_restrictable"` + SessionIdleTimeoutInMinutes *int `access:"environment,write_restrictable,cloud_restrictable"` + WebsocketSecurePort *int `access:"write_restrictable,cloud_restrictable"` + WebsocketPort *int `access:"write_restrictable,cloud_restrictable"` + WebserverMode *string `access:"environment,write_restrictable,cloud_restrictable"` + EnableCustomEmoji *bool `access:"site"` + EnableEmojiPicker *bool `access:"site"` + EnableGifPicker *bool `access:"integrations"` + GfycatApiKey *string `access:"integrations"` + GfycatApiSecret *string `access:"integrations"` + DEPRECATED_DO_NOT_USE_RestrictCustomEmojiCreation *string `json:"RestrictCustomEmojiCreation" mapstructure:"RestrictCustomEmojiCreation"` // This field is deprecated and must not be used. + DEPRECATED_DO_NOT_USE_RestrictPostDelete *string `json:"RestrictPostDelete" mapstructure:"RestrictPostDelete"` // This field is deprecated and must not be used. + DEPRECATED_DO_NOT_USE_AllowEditPost *string `json:"AllowEditPost" mapstructure:"AllowEditPost"` // This field is deprecated and must not be used. + PostEditTimeLimit *int `access:"user_management_permissions"` + TimeBetweenUserTypingUpdatesMilliseconds *int64 `access:"experimental,write_restrictable,cloud_restrictable"` + EnablePostSearch *bool `access:"write_restrictable,cloud_restrictable"` + MinimumHashtagLength *int `access:"environment,write_restrictable,cloud_restrictable"` + EnableUserTypingMessages *bool `access:"experimental,write_restrictable,cloud_restrictable"` + EnableChannelViewedMessages *bool `access:"experimental,write_restrictable,cloud_restrictable"` + EnableUserStatuses *bool `access:"write_restrictable,cloud_restrictable"` + ExperimentalEnableAuthenticationTransfer *bool `access:"experimental,write_restrictable,cloud_restrictable"` + ClusterLogTimeoutMilliseconds *int `access:"write_restrictable,cloud_restrictable"` + CloseUnusedDirectMessages *bool `access:"experimental"` + EnablePreviewFeatures *bool `access:"experimental"` + EnableTutorial *bool `access:"experimental"` + ExperimentalEnableDefaultChannelLeaveJoinMessages *bool `access:"experimental"` + ExperimentalGroupUnreadChannels *string `access:"experimental"` + ExperimentalChannelOrganization *bool `access:"experimental"` + DEPRECATED_DO_NOT_USE_ImageProxyType *string `json:"ImageProxyType" mapstructure:"ImageProxyType"` // This field is deprecated and must not be used. + DEPRECATED_DO_NOT_USE_ImageProxyURL *string `json:"ImageProxyURL" mapstructure:"ImageProxyURL"` // This field is deprecated and must not be used. + DEPRECATED_DO_NOT_USE_ImageProxyOptions *string `json:"ImageProxyOptions" mapstructure:"ImageProxyOptions"` // This field is deprecated and must not be used. EnableAPITeamDeletion *bool - ExperimentalEnableHardenedMode *bool - DisableLegacyMFA *bool `restricted:"true"` - ExperimentalStrictCSRFEnforcement *bool `restricted:"true"` - EnableEmailInvitations *bool - ExperimentalLdapGroupSync *bool - DisableBotsWhenOwnerIsDeactivated *bool `restricted:"true"` - EnableBotAccountCreation *bool - EnableSVGs *bool + EnableAPIUserDeletion *bool + ExperimentalEnableHardenedMode *bool `access:"experimental"` + DisableLegacyMFA *bool `access:"write_restrictable,cloud_restrictable"` + ExperimentalStrictCSRFEnforcement *bool `access:"experimental,write_restrictable,cloud_restrictable"` + EnableEmailInvitations *bool `access:"authentication"` + DisableBotsWhenOwnerIsDeactivated *bool `access:"integrations,write_restrictable,cloud_restrictable"` + EnableBotAccountCreation *bool `access:"integrations"` + EnableSVGs *bool `access:"site"` + EnableLatex *bool `access:"site"` + EnableAPIChannelDeletion *bool + EnableLocalMode *bool + LocalModeSocketLocation *string + EnableAWSMetering *bool + SplitKey *string `access:"environment,write_restrictable"` + FeatureFlagSyncIntervalSeconds *int `access:"environment,write_restrictable"` + DebugSplit *bool `access:"environment,write_restrictable"` + ThreadAutoFollow *bool `access:"experimental"` + CollapsedThreads *string `access:"experimental"` + ManagedResourcePaths *string `access:"environment,write_restrictable,cloud_restrictable"` + EnableLegacySidebar *bool `access:"experimental"` } func (s *ServiceSettings) SetDefaults(isUpdate bool) { @@ -341,7 +399,7 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) { } if s.EnableLinkPreviews == nil { - s.EnableLinkPreviews = NewBool(false) + s.EnableLinkPreviews = NewBool(true) } if s.EnableTesting == nil { @@ -352,6 +410,10 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) { s.EnableDeveloper = NewBool(false) } + if s.EnableOpenTracing == nil { + s.EnableOpenTracing = NewBool(false) + } + if s.EnableSecurityFixAlert == nil { s.EnableSecurityFixAlert = NewBool(true) } @@ -392,10 +454,6 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) { s.EnableIncomingWebhooks = NewBool(true) } - if s.EnableIncomingWebhooks == nil { - s.EnableIncomingWebhooks = NewBool(true) - } - if s.EnableOutgoingWebhooks == nil { s.EnableOutgoingWebhooks = NewBool(true) } @@ -444,6 +502,10 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) { s.WriteTimeout = NewInt(SERVICE_SETTINGS_DEFAULT_WRITE_TIMEOUT) } + if s.IdleTimeout == nil { + s.IdleTimeout = NewInt(SERVICE_SETTINGS_DEFAULT_IDLE_TIMEOUT) + } + if s.MaximumLoginAttempts == nil { s.MaximumLoginAttempts = NewInt(SERVICE_SETTINGS_DEFAULT_MAX_LOGIN_ATTEMPTS) } @@ -498,12 +560,25 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) { s.EnableTutorial = NewBool(true) } + // Must be manually enabled for existing installations. + if s.ExtendSessionLengthWithActivity == nil { + s.ExtendSessionLengthWithActivity = NewBool(!isUpdate) + } + if s.SessionLengthWebInDays == nil { - s.SessionLengthWebInDays = NewInt(180) + if isUpdate { + s.SessionLengthWebInDays = NewInt(180) + } else { + s.SessionLengthWebInDays = NewInt(30) + } } if s.SessionLengthMobileInDays == nil { - s.SessionLengthMobileInDays = NewInt(180) + if isUpdate { + s.SessionLengthMobileInDays = NewInt(180) + } else { + s.SessionLengthMobileInDays = NewInt(30) + } } if s.SessionLengthSSOInDays == nil { @@ -569,7 +644,7 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) { } if s.EnableCustomEmoji == nil { - s.EnableCustomEmoji = NewBool(false) + s.EnableCustomEmoji = NewBool(true) } if s.EnableEmojiPicker == nil { @@ -577,7 +652,7 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) { } if s.EnableGifPicker == nil { - s.EnableGifPicker = NewBool(false) + s.EnableGifPicker = NewBool(true) } if s.GfycatApiKey == nil || *s.GfycatApiKey == "" { @@ -645,6 +720,14 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) { s.EnableAPITeamDeletion = NewBool(false) } + if s.EnableAPIUserDeletion == nil { + s.EnableAPIUserDeletion = NewBool(false) + } + + if s.EnableAPIChannelDeletion == nil { + s.EnableAPIChannelDeletion = NewBool(false) + } + if s.ExperimentalEnableHardenedMode == nil { s.ExperimentalEnableHardenedMode = NewBool(false) } @@ -653,10 +736,6 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) { s.DisableLegacyMFA = NewBool(!isUpdate) } - if s.ExperimentalLdapGroupSync == nil { - s.ExperimentalLdapGroupSync = NewBool(false) - } - if s.ExperimentalStrictCSRFEnforcement == nil { s.ExperimentalStrictCSRFEnforcement = NewBool(false) } @@ -676,23 +755,72 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) { s.EnableSVGs = NewBool(false) } } + + if s.EnableLatex == nil { + if isUpdate { + s.EnableLatex = NewBool(true) + } else { + s.EnableLatex = NewBool(false) + } + } + + if s.EnableLocalMode == nil { + s.EnableLocalMode = NewBool(false) + } + + if s.LocalModeSocketLocation == nil { + s.LocalModeSocketLocation = NewString(LOCAL_MODE_SOCKET_PATH) + } + + if s.EnableAWSMetering == nil { + s.EnableAWSMetering = NewBool(false) + } + + if s.SplitKey == nil { + s.SplitKey = NewString("") + } + + if s.FeatureFlagSyncIntervalSeconds == nil { + s.FeatureFlagSyncIntervalSeconds = NewInt(30) + } + + if s.DebugSplit == nil { + s.DebugSplit = NewBool(false) + } + + if s.ThreadAutoFollow == nil { + s.ThreadAutoFollow = NewBool(true) + } + + if s.CollapsedThreads == nil { + s.CollapsedThreads = NewString(COLLAPSED_THREADS_DISABLED) + } + + if s.ManagedResourcePaths == nil { + s.ManagedResourcePaths = NewString("") + } + + if s.EnableLegacySidebar == nil { + s.EnableLegacySidebar = NewBool(false) + } } type ClusterSettings struct { - Enable *bool `restricted:"true"` - ClusterName *string `restricted:"true"` - OverrideHostname *string `restricted:"true"` - NetworkInterface *string `restricted:"true"` - BindAddress *string `restricted:"true"` - AdvertiseAddress *string `restricted:"true"` - UseIpAddress *bool `restricted:"true"` - UseExperimentalGossip *bool `restricted:"true"` - ReadOnlyConfig *bool `restricted:"true"` - GossipPort *int `restricted:"true"` - StreamingPort *int `restricted:"true"` - MaxIdleConns *int `restricted:"true"` - MaxIdleConnsPerHost *int `restricted:"true"` - IdleConnTimeoutMilliseconds *int `restricted:"true"` + Enable *bool `access:"environment,write_restrictable"` + ClusterName *string `access:"environment,write_restrictable,cloud_restrictable"` + OverrideHostname *string `access:"environment,write_restrictable,cloud_restrictable"` + NetworkInterface *string `access:"environment,write_restrictable,cloud_restrictable"` + BindAddress *string `access:"environment,write_restrictable,cloud_restrictable"` + AdvertiseAddress *string `access:"environment,write_restrictable,cloud_restrictable"` + UseIpAddress *bool `access:"environment,write_restrictable,cloud_restrictable"` + UseExperimentalGossip *bool `access:"environment,write_restrictable,cloud_restrictable"` + EnableExperimentalGossipEncryption *bool `access:"environment,write_restrictable,cloud_restrictable"` + ReadOnlyConfig *bool `access:"environment,write_restrictable,cloud_restrictable"` + GossipPort *int `access:"environment,write_restrictable,cloud_restrictable"` + StreamingPort *int `access:"environment,write_restrictable,cloud_restrictable"` + MaxIdleConns *int `access:"environment,write_restrictable,cloud_restrictable"` + MaxIdleConnsPerHost *int `access:"environment,write_restrictable,cloud_restrictable"` + IdleConnTimeoutMilliseconds *int `access:"environment,write_restrictable,cloud_restrictable"` } func (s *ClusterSettings) SetDefaults() { @@ -725,7 +853,11 @@ func (s *ClusterSettings) SetDefaults() { } if s.UseExperimentalGossip == nil { - s.UseExperimentalGossip = NewBool(false) + s.UseExperimentalGossip = NewBool(true) + } + + if s.EnableExperimentalGossipEncryption == nil { + s.EnableExperimentalGossipEncryption = NewBool(false) } if s.ReadOnlyConfig == nil { @@ -754,9 +886,9 @@ func (s *ClusterSettings) SetDefaults() { } type MetricsSettings struct { - Enable *bool `restricted:"true"` - BlockProfileRate *int `restricted:"true"` - ListenAddress *string `restricted:"true"` + Enable *bool `access:"environment,write_restrictable,cloud_restrictable"` + BlockProfileRate *int `access:"environment,write_restrictable,cloud_restrictable"` + ListenAddress *string `access:"environment,write_restrictable,cloud_restrictable"` } func (s *MetricsSettings) SetDefaults() { @@ -774,11 +906,15 @@ func (s *MetricsSettings) SetDefaults() { } type ExperimentalSettings struct { - ClientSideCertEnable *bool - ClientSideCertCheck *string - EnableClickToReply *bool `restricted:"true"` - LinkMetadataTimeoutMilliseconds *int64 `restricted:"true"` - RestrictSystemAdmin *bool `restricted:"true"` + ClientSideCertEnable *bool `access:"experimental,cloud_restrictable"` + ClientSideCertCheck *string `access:"experimental,cloud_restrictable"` + EnableClickToReply *bool `access:"experimental,write_restrictable,cloud_restrictable"` + LinkMetadataTimeoutMilliseconds *int64 `access:"experimental,write_restrictable,cloud_restrictable"` + RestrictSystemAdmin *bool `access:"experimental,write_restrictable"` + UseNewSAMLLibrary *bool `access:"experimental,cloud_restrictable"` + CloudUserLimit *int64 `access:"experimental,write_restrictable"` + CloudBilling *bool `access:"experimental,write_restrictable"` + EnableSharedChannels *bool `access:"experimental"` } func (s *ExperimentalSettings) SetDefaults() { @@ -801,10 +937,27 @@ func (s *ExperimentalSettings) SetDefaults() { if s.RestrictSystemAdmin == nil { s.RestrictSystemAdmin = NewBool(false) } + + if s.CloudUserLimit == nil { + // User limit 0 is treated as no limit + s.CloudUserLimit = NewInt64(0) + } + + if s.CloudBilling == nil { + s.CloudBilling = NewBool(false) + } + + if s.UseNewSAMLLibrary == nil { + s.UseNewSAMLLibrary = NewBool(false) + } + + if s.EnableSharedChannels == nil { + s.EnableSharedChannels = NewBool(false) + } } type AnalyticsSettings struct { - MaxUsersForStatistics *int `restricted:"true"` + MaxUsersForStatistics *int `access:"write_restrictable,cloud_restrictable"` } func (s *AnalyticsSettings) SetDefaults() { @@ -814,16 +967,19 @@ func (s *AnalyticsSettings) SetDefaults() { } type SSOSettings struct { - Enable *bool - Secret *string - Id *string - Scope *string - AuthEndpoint *string - TokenEndpoint *string - UserApiEndpoint *string -} - -func (s *SSOSettings) setDefaults(scope, authEndpoint, tokenEndpoint, userApiEndpoint string) { + Enable *bool `access:"authentication"` + Secret *string `access:"authentication"` + Id *string `access:"authentication"` + Scope *string `access:"authentication"` + AuthEndpoint *string `access:"authentication"` + TokenEndpoint *string `access:"authentication"` + UserApiEndpoint *string `access:"authentication"` + DiscoveryEndpoint *string `access:"authentication"` + ButtonText *string `access:"authentication"` + ButtonColor *string `access:"authentication"` +} + +func (s *SSOSettings) setDefaults(scope, authEndpoint, tokenEndpoint, userApiEndpoint, buttonColor string) { if s.Enable == nil { s.Enable = NewBool(false) } @@ -840,6 +996,10 @@ func (s *SSOSettings) setDefaults(scope, authEndpoint, tokenEndpoint, userApiEnd s.Scope = NewString(scope) } + if s.DiscoveryEndpoint == nil { + s.DiscoveryEndpoint = NewString("") + } + if s.AuthEndpoint == nil { s.AuthEndpoint = NewString(authEndpoint) } @@ -851,24 +1011,96 @@ func (s *SSOSettings) setDefaults(scope, authEndpoint, tokenEndpoint, userApiEnd if s.UserApiEndpoint == nil { s.UserApiEndpoint = NewString(userApiEndpoint) } + + if s.ButtonText == nil { + s.ButtonText = NewString("") + } + + if s.ButtonColor == nil { + s.ButtonColor = NewString(buttonColor) + } +} + +type Office365Settings struct { + Enable *bool `access:"authentication"` + Secret *string `access:"authentication"` + Id *string `access:"authentication"` + Scope *string `access:"authentication"` + AuthEndpoint *string `access:"authentication"` + TokenEndpoint *string `access:"authentication"` + UserApiEndpoint *string `access:"authentication"` + DiscoveryEndpoint *string `access:"authentication"` + DirectoryId *string `access:"authentication"` +} + +func (s *Office365Settings) setDefaults() { + if s.Enable == nil { + s.Enable = NewBool(false) + } + + if s.Id == nil { + s.Id = NewString("") + } + + if s.Secret == nil { + s.Secret = NewString("") + } + + if s.Scope == nil { + s.Scope = NewString(OFFICE365_SETTINGS_DEFAULT_SCOPE) + } + + if s.DiscoveryEndpoint == nil { + s.DiscoveryEndpoint = NewString("") + } + + if s.AuthEndpoint == nil { + s.AuthEndpoint = NewString(OFFICE365_SETTINGS_DEFAULT_AUTH_ENDPOINT) + } + + if s.TokenEndpoint == nil { + s.TokenEndpoint = NewString(OFFICE365_SETTINGS_DEFAULT_TOKEN_ENDPOINT) + } + + if s.UserApiEndpoint == nil { + s.UserApiEndpoint = NewString(OFFICE365_SETTINGS_DEFAULT_USER_API_ENDPOINT) + } + + if s.DirectoryId == nil { + s.DirectoryId = NewString("") + } +} + +func (s *Office365Settings) SSOSettings() *SSOSettings { + ssoSettings := SSOSettings{} + ssoSettings.Enable = s.Enable + ssoSettings.Secret = s.Secret + ssoSettings.Id = s.Id + ssoSettings.Scope = s.Scope + ssoSettings.DiscoveryEndpoint = s.DiscoveryEndpoint + ssoSettings.AuthEndpoint = s.AuthEndpoint + ssoSettings.TokenEndpoint = s.TokenEndpoint + ssoSettings.UserApiEndpoint = s.UserApiEndpoint + return &ssoSettings } type SqlSettings struct { - DriverName *string `restricted:"true"` - DataSource *string `restricted:"true"` - DataSourceReplicas []string `restricted:"true"` - DataSourceSearchReplicas []string `restricted:"true"` - MaxIdleConns *int `restricted:"true"` - ConnMaxLifetimeMilliseconds *int `restricted:"true"` - MaxOpenConns *int `restricted:"true"` - Trace *bool `restricted:"true"` - AtRestEncryptKey *string `restricted:"true"` - QueryTimeout *int `restricted:"true"` + DriverName *string `access:"environment,write_restrictable,cloud_restrictable"` + DataSource *string `access:"environment,write_restrictable,cloud_restrictable"` + DataSourceReplicas []string `access:"environment,write_restrictable,cloud_restrictable"` + DataSourceSearchReplicas []string `access:"environment,write_restrictable,cloud_restrictable"` + MaxIdleConns *int `access:"environment,write_restrictable,cloud_restrictable"` + ConnMaxLifetimeMilliseconds *int `access:"environment,write_restrictable,cloud_restrictable"` + MaxOpenConns *int `access:"environment,write_restrictable,cloud_restrictable"` + Trace *bool `access:"environment,write_restrictable,cloud_restrictable"` + AtRestEncryptKey *string `access:"environment,write_restrictable,cloud_restrictable"` + QueryTimeout *int `access:"environment,write_restrictable,cloud_restrictable"` + DisableDatabaseSearch *bool `access:"environment,write_restrictable,cloud_restrictable"` } func (s *SqlSettings) SetDefaults(isUpdate bool) { if s.DriverName == nil { - s.DriverName = NewString(DATABASE_DRIVER_MYSQL) + s.DriverName = NewString(DATABASE_DRIVER_POSTGRES) } if s.DataSource == nil { @@ -912,18 +1144,24 @@ func (s *SqlSettings) SetDefaults(isUpdate bool) { if s.QueryTimeout == nil { s.QueryTimeout = NewInt(30) } + + if s.DisableDatabaseSearch == nil { + s.DisableDatabaseSearch = NewBool(false) + } } type LogSettings struct { - EnableConsole *bool `restricted:"true"` - ConsoleLevel *string `restricted:"true"` - ConsoleJson *bool `restricted:"true"` - EnableFile *bool `restricted:"true"` - FileLevel *string `restricted:"true"` - FileJson *bool `restricted:"true"` - FileLocation *string `restricted:"true"` - EnableWebhookDebugging *bool `restricted:"true"` - EnableDiagnostics *bool `restricted:"true"` + EnableConsole *bool `access:"environment,write_restrictable,cloud_restrictable"` + ConsoleLevel *string `access:"environment,write_restrictable,cloud_restrictable"` + ConsoleJson *bool `access:"environment,write_restrictable,cloud_restrictable"` + EnableFile *bool `access:"environment,write_restrictable,cloud_restrictable"` + FileLevel *string `access:"environment,write_restrictable,cloud_restrictable"` + FileJson *bool `access:"environment,write_restrictable,cloud_restrictable"` + FileLocation *string `access:"environment,write_restrictable,cloud_restrictable"` + EnableWebhookDebugging *bool `access:"environment,write_restrictable,cloud_restrictable"` + EnableDiagnostics *bool `access:"environment,write_restrictable,cloud_restrictable"` + EnableSentry *bool `access:"environment,write_restrictable,cloud_restrictable"` + AdvancedLoggingConfig *string `access:"environment,write_restrictable,cloud_restrictable"` } func (s *LogSettings) SetDefaults() { @@ -955,6 +1193,10 @@ func (s *LogSettings) SetDefaults() { s.EnableDiagnostics = NewBool(true) } + if s.EnableSentry == nil { + s.EnableSentry = NewBool(*s.EnableDiagnostics) + } + if s.ConsoleJson == nil { s.ConsoleJson = NewBool(true) } @@ -962,16 +1204,66 @@ func (s *LogSettings) SetDefaults() { if s.FileJson == nil { s.FileJson = NewBool(true) } + + if s.AdvancedLoggingConfig == nil { + s.AdvancedLoggingConfig = NewString("") + } +} + +type ExperimentalAuditSettings struct { + FileEnabled *bool `access:"experimental,write_restrictable,cloud_restrictable"` + FileName *string `access:"experimental,write_restrictable,cloud_restrictable"` + FileMaxSizeMB *int `access:"experimental,write_restrictable,cloud_restrictable"` + FileMaxAgeDays *int `access:"experimental,write_restrictable,cloud_restrictable"` + FileMaxBackups *int `access:"experimental,write_restrictable,cloud_restrictable"` + FileCompress *bool `access:"experimental,write_restrictable,cloud_restrictable"` + FileMaxQueueSize *int `access:"experimental,write_restrictable,cloud_restrictable"` + AdvancedLoggingConfig *string `access:"experimental,write_restrictable,cloud_restrictable"` +} + +func (s *ExperimentalAuditSettings) SetDefaults() { + if s.FileEnabled == nil { + s.FileEnabled = NewBool(false) + } + + if s.FileName == nil { + s.FileName = NewString("") + } + + if s.FileMaxSizeMB == nil { + s.FileMaxSizeMB = NewInt(100) + } + + if s.FileMaxAgeDays == nil { + s.FileMaxAgeDays = NewInt(0) // no limit on age + } + + if s.FileMaxBackups == nil { // no limit on number of backups + s.FileMaxBackups = NewInt(0) + } + + if s.FileCompress == nil { + s.FileCompress = NewBool(false) + } + + if s.FileMaxQueueSize == nil { + s.FileMaxQueueSize = NewInt(1000) + } + + if s.AdvancedLoggingConfig == nil { + s.AdvancedLoggingConfig = NewString("") + } } type NotificationLogSettings struct { - EnableConsole *bool `restricted:"true"` - ConsoleLevel *string `restricted:"true"` - ConsoleJson *bool `restricted:"true"` - EnableFile *bool `restricted:"true"` - FileLevel *string `restricted:"true"` - FileJson *bool `restricted:"true"` - FileLocation *string `restricted:"true"` + EnableConsole *bool `access:"write_restrictable,cloud_restrictable"` + ConsoleLevel *string `access:"write_restrictable,cloud_restrictable"` + ConsoleJson *bool `access:"write_restrictable,cloud_restrictable"` + EnableFile *bool `access:"write_restrictable,cloud_restrictable"` + FileLevel *string `access:"write_restrictable,cloud_restrictable"` + FileJson *bool `access:"write_restrictable,cloud_restrictable"` + FileLocation *string `access:"write_restrictable,cloud_restrictable"` + AdvancedLoggingConfig *string `access:"write_restrictable,cloud_restrictable"` } func (s *NotificationLogSettings) SetDefaults() { @@ -1002,14 +1294,18 @@ func (s *NotificationLogSettings) SetDefaults() { if s.FileJson == nil { s.FileJson = NewBool(true) } + + if s.AdvancedLoggingConfig == nil { + s.AdvancedLoggingConfig = NewString("") + } } type PasswordSettings struct { - MinimumLength *int - Lowercase *bool - Number *bool - Uppercase *bool - Symbol *bool + MinimumLength *int `access:"authentication"` + Lowercase *bool `access:"authentication"` + Number *bool `access:"authentication"` + Uppercase *bool `access:"authentication"` + Symbol *bool `access:"authentication"` } func (s *PasswordSettings) SetDefaults() { @@ -1035,24 +1331,25 @@ func (s *PasswordSettings) SetDefaults() { } type FileSettings struct { - EnableFileAttachments *bool - EnableMobileUpload *bool - EnableMobileDownload *bool - MaxFileSize *int64 - DriverName *string `restricted:"true"` - Directory *string `restricted:"true"` - EnablePublicLink *bool - PublicLinkSalt *string - InitialFont *string - AmazonS3AccessKeyId *string `restricted:"true"` - AmazonS3SecretAccessKey *string `restricted:"true"` - AmazonS3Bucket *string `restricted:"true"` - AmazonS3Region *string `restricted:"true"` - AmazonS3Endpoint *string `restricted:"true"` - AmazonS3SSL *bool `restricted:"true"` - AmazonS3SignV2 *bool `restricted:"true"` - AmazonS3SSE *bool `restricted:"true"` - AmazonS3Trace *bool `restricted:"true"` + EnableFileAttachments *bool `access:"site,cloud_restrictable"` + EnableMobileUpload *bool `access:"site,cloud_restrictable"` + EnableMobileDownload *bool `access:"site,cloud_restrictable"` + MaxFileSize *int64 `access:"environment,cloud_restrictable"` + DriverName *string `access:"environment,write_restrictable,cloud_restrictable"` + Directory *string `access:"environment,write_restrictable,cloud_restrictable"` + EnablePublicLink *bool `access:"site,cloud_restrictable"` + PublicLinkSalt *string `access:"site,cloud_restrictable"` + InitialFont *string `access:"environment,cloud_restrictable"` + AmazonS3AccessKeyId *string `access:"environment,write_restrictable,cloud_restrictable"` + AmazonS3SecretAccessKey *string `access:"environment,write_restrictable,cloud_restrictable"` + AmazonS3Bucket *string `access:"environment,write_restrictable,cloud_restrictable"` + AmazonS3PathPrefix *string `access:"environment,write_restrictable,cloud_restrictable"` + AmazonS3Region *string `access:"environment,write_restrictable,cloud_restrictable"` + AmazonS3Endpoint *string `access:"environment,write_restrictable,cloud_restrictable"` + AmazonS3SSL *bool `access:"environment,write_restrictable,cloud_restrictable"` + AmazonS3SignV2 *bool `access:"environment,write_restrictable,cloud_restrictable"` + AmazonS3SSE *bool `access:"environment,write_restrictable,cloud_restrictable"` + AmazonS3Trace *bool `access:"environment,write_restrictable,cloud_restrictable"` } func (s *FileSettings) SetDefaults(isUpdate bool) { @@ -1076,7 +1373,7 @@ func (s *FileSettings) SetDefaults(isUpdate bool) { s.DriverName = NewString(IMAGE_DRIVER_LOCAL) } - if s.Directory == nil { + if s.Directory == nil || *s.Directory == "" { s.Directory = NewString(FILE_SETTINGS_DEFAULT_DIRECTORY) } @@ -1111,6 +1408,10 @@ func (s *FileSettings) SetDefaults(isUpdate bool) { s.AmazonS3Bucket = NewString("") } + if s.AmazonS3PathPrefix == nil { + s.AmazonS3PathPrefix = NewString("") + } + if s.AmazonS3Region == nil { s.AmazonS3Region = NewString("") } @@ -1139,34 +1440,36 @@ func (s *FileSettings) SetDefaults(isUpdate bool) { } type EmailSettings struct { - EnableSignUpWithEmail *bool - EnableSignInWithEmail *bool - EnableSignInWithUsername *bool - SendEmailNotifications *bool - UseChannelInEmailNotifications *bool - RequireEmailVerification *bool - FeedbackName *string - FeedbackEmail *string - ReplyToAddress *string - FeedbackOrganization *string - EnableSMTPAuth *bool `restricted:"true"` - SMTPUsername *string `restricted:"true"` - SMTPPassword *string `restricted:"true"` - SMTPServer *string `restricted:"true"` - SMTPPort *string `restricted:"true"` - ConnectionSecurity *string `restricted:"true"` - SendPushNotifications *bool - PushNotificationServer *string - PushNotificationContents *string - EnableEmailBatching *bool - EmailBatchingBufferSize *int - EmailBatchingInterval *int - EnablePreviewModeBanner *bool - SkipServerCertificateVerification *bool `restricted:"true"` - EmailNotificationContentsType *string - LoginButtonColor *string - LoginButtonBorderColor *string - LoginButtonTextColor *string + EnableSignUpWithEmail *bool `access:"authentication"` + EnableSignInWithEmail *bool `access:"authentication"` + EnableSignInWithUsername *bool `access:"authentication"` + SendEmailNotifications *bool `access:"site"` + UseChannelInEmailNotifications *bool `access:"experimental"` + RequireEmailVerification *bool `access:"authentication"` + FeedbackName *string `access:"site"` + FeedbackEmail *string `access:"site,cloud_restrictable"` + ReplyToAddress *string `access:"site,cloud_restrictable"` + FeedbackOrganization *string `access:"site"` + EnableSMTPAuth *bool `access:"environment,write_restrictable,cloud_restrictable"` + SMTPUsername *string `access:"environment,write_restrictable,cloud_restrictable"` + SMTPPassword *string `access:"environment,write_restrictable,cloud_restrictable"` + SMTPServer *string `access:"environment,write_restrictable,cloud_restrictable"` + SMTPPort *string `access:"environment,write_restrictable,cloud_restrictable"` + SMTPServerTimeout *int `access:"cloud_restrictable"` + ConnectionSecurity *string `access:"environment,write_restrictable,cloud_restrictable"` + SendPushNotifications *bool `access:"environment"` + PushNotificationServer *string `access:"environment"` + PushNotificationContents *string `access:"site"` + PushNotificationBuffer *int + EnableEmailBatching *bool `access:"site"` + EmailBatchingBufferSize *int `access:"experimental"` + EmailBatchingInterval *int `access:"experimental"` + EnablePreviewModeBanner *bool `access:"site"` + SkipServerCertificateVerification *bool `access:"environment,write_restrictable,cloud_restrictable"` + EmailNotificationContentsType *string `access:"site"` + LoginButtonColor *string `access:"experimental"` + LoginButtonBorderColor *string `access:"experimental"` + LoginButtonTextColor *string `access:"experimental"` } func (s *EmailSettings) SetDefaults(isUpdate bool) { @@ -1227,11 +1530,15 @@ func (s *EmailSettings) SetDefaults(isUpdate bool) { } if s.SMTPServer == nil || len(*s.SMTPServer) == 0 { - s.SMTPServer = NewString("dockerhost") + s.SMTPServer = NewString("localhost") } if s.SMTPPort == nil || len(*s.SMTPPort) == 0 { - s.SMTPPort = NewString("2500") + s.SMTPPort = NewString("10025") + } + + if s.SMTPServerTimeout == nil || *s.SMTPServerTimeout == 0 { + s.SMTPServerTimeout = NewInt(10) } if s.ConnectionSecurity == nil || *s.ConnectionSecurity == CONN_SECURITY_PLAIN { @@ -1251,7 +1558,11 @@ func (s *EmailSettings) SetDefaults(isUpdate bool) { } if s.PushNotificationContents == nil { - s.PushNotificationContents = NewString(GENERIC_NOTIFICATION) + s.PushNotificationContents = NewString(FULL_NOTIFICATION) + } + + if s.PushNotificationBuffer == nil { + s.PushNotificationBuffer = NewInt(1000) } if s.EnableEmailBatching == nil { @@ -1304,13 +1615,13 @@ func (s *EmailSettings) SetDefaults(isUpdate bool) { } type RateLimitSettings struct { - Enable *bool `restricted:"true"` - PerSec *int `restricted:"true"` - MaxBurst *int `restricted:"true"` - MemoryStoreSize *int `restricted:"true"` - VaryByRemoteAddr *bool `restricted:"true"` - VaryByUser *bool `restricted:"true"` - VaryByHeader string `restricted:"true"` + Enable *bool `access:"environment,write_restrictable,cloud_restrictable"` + PerSec *int `access:"environment,write_restrictable,cloud_restrictable"` + MaxBurst *int `access:"environment,write_restrictable,cloud_restrictable"` + MemoryStoreSize *int `access:"environment,write_restrictable,cloud_restrictable"` + VaryByRemoteAddr *bool `access:"environment,write_restrictable,cloud_restrictable"` + VaryByUser *bool `access:"environment,write_restrictable,cloud_restrictable"` + VaryByHeader string `access:"environment,write_restrictable,cloud_restrictable"` } func (s *RateLimitSettings) SetDefaults() { @@ -1340,8 +1651,8 @@ func (s *RateLimitSettings) SetDefaults() { } type PrivacySettings struct { - ShowEmailAddress *bool - ShowFullName *bool + ShowEmailAddress *bool `access:"site"` + ShowFullName *bool `access:"site"` } func (s *PrivacySettings) setDefaults() { @@ -1355,14 +1666,15 @@ func (s *PrivacySettings) setDefaults() { } type SupportSettings struct { - TermsOfServiceLink *string `restricted:"true"` - PrivacyPolicyLink *string `restricted:"true"` - AboutLink *string `restricted:"true"` - HelpLink *string `restricted:"true"` - ReportAProblemLink *string `restricted:"true"` - SupportEmail *string - CustomTermsOfServiceEnabled *bool - CustomTermsOfServiceReAcceptancePeriod *int + TermsOfServiceLink *string `access:"site,write_restrictable,cloud_restrictable"` + PrivacyPolicyLink *string `access:"site,write_restrictable,cloud_restrictable"` + AboutLink *string `access:"site,write_restrictable,cloud_restrictable"` + HelpLink *string `access:"site,write_restrictable,cloud_restrictable"` + ReportAProblemLink *string `access:"site,write_restrictable,cloud_restrictable"` + SupportEmail *string `access:"site"` + CustomTermsOfServiceEnabled *bool `access:"compliance"` + CustomTermsOfServiceReAcceptancePeriod *int `access:"compliance"` + EnableAskCommunityLink *bool `access:"site"` } func (s *SupportSettings) SetDefaults() { @@ -1417,14 +1729,23 @@ func (s *SupportSettings) SetDefaults() { if s.CustomTermsOfServiceReAcceptancePeriod == nil { s.CustomTermsOfServiceReAcceptancePeriod = NewInt(SUPPORT_SETTINGS_DEFAULT_RE_ACCEPTANCE_PERIOD) } + + if s.EnableAskCommunityLink == nil { + s.EnableAskCommunityLink = NewBool(true) + } } type AnnouncementSettings struct { - EnableBanner *bool - BannerText *string - BannerColor *string - BannerTextColor *string - AllowBannerDismissal *bool + EnableBanner *bool `access:"site"` + BannerText *string `access:"site"` + BannerColor *string `access:"site"` + BannerTextColor *string `access:"site"` + AllowBannerDismissal *bool `access:"site"` + AdminNoticesEnabled *bool `access:"site"` + UserNoticesEnabled *bool `access:"site"` + NoticesURL *string `access:"site,write_restrictable"` + NoticesFetchFrequency *int `access:"site,write_restrictable"` + NoticesSkipCache *bool `access:"site,write_restrictable"` } func (s *AnnouncementSettings) SetDefaults() { @@ -1447,12 +1768,30 @@ func (s *AnnouncementSettings) SetDefaults() { if s.AllowBannerDismissal == nil { s.AllowBannerDismissal = NewBool(true) } + + if s.AdminNoticesEnabled == nil { + s.AdminNoticesEnabled = NewBool(true) + } + + if s.UserNoticesEnabled == nil { + s.UserNoticesEnabled = NewBool(true) + } + if s.NoticesURL == nil { + s.NoticesURL = NewString(ANNOUNCEMENT_SETTINGS_DEFAULT_NOTICES_JSON_URL) + } + if s.NoticesSkipCache == nil { + s.NoticesSkipCache = NewBool(false) + } + if s.NoticesFetchFrequency == nil { + s.NoticesFetchFrequency = NewInt(ANNOUNCEMENT_SETTINGS_DEFAULT_NOTICES_FETCH_FREQUENCY_SECONDS) + } + } type ThemeSettings struct { - EnableThemeSelection *bool - DefaultTheme *string - AllowCustomThemes *bool + EnableThemeSelection *bool `access:"experimental"` + DefaultTheme *string `access:"experimental"` + AllowCustomThemes *bool `access:"experimental"` AllowedThemes []string } @@ -1475,37 +1814,38 @@ func (s *ThemeSettings) SetDefaults() { } type TeamSettings struct { - SiteName *string - MaxUsersPerTeam *int - DEPRECATED_DO_NOT_USE_EnableTeamCreation *bool `json:"EnableTeamCreation" mapstructure:"EnableTeamCreation"` // This field is deprecated and must not be used. - EnableUserCreation *bool - EnableOpenServer *bool - EnableUserDeactivation *bool - RestrictCreationToDomains *string - EnableCustomBrand *bool - CustomBrandText *string - CustomDescriptionText *string - RestrictDirectMessage *string - DEPRECATED_DO_NOT_USE_RestrictTeamInvite *string `json:"RestrictTeamInvite" mapstructure:"RestrictTeamInvite"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_RestrictPublicChannelManagement *string `json:"RestrictPublicChannelManagement" mapstructure:"RestrictPublicChannelManagement"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_RestrictPrivateChannelManagement *string `json:"RestrictPrivateChannelManagement" mapstructure:"RestrictPrivateChannelManagement"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_RestrictPublicChannelCreation *string `json:"RestrictPublicChannelCreation" mapstructure:"RestrictPublicChannelCreation"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_RestrictPrivateChannelCreation *string `json:"RestrictPrivateChannelCreation" mapstructure:"RestrictPrivateChannelCreation"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_RestrictPublicChannelDeletion *string `json:"RestrictPublicChannelDeletion" mapstructure:"RestrictPublicChannelDeletion"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_RestrictPrivateChannelDeletion *string `json:"RestrictPrivateChannelDeletion" mapstructure:"RestrictPrivateChannelDeletion"` // This field is deprecated and must not be used. - DEPRECATED_DO_NOT_USE_RestrictPrivateChannelManageMembers *string `json:"RestrictPrivateChannelManageMembers" mapstructure:"RestrictPrivateChannelManageMembers"` // This field is deprecated and must not be used. - EnableXToLeaveChannelsFromLHS *bool - UserStatusAwayTimeout *int64 - MaxChannelsPerTeam *int64 - MaxNotificationsPerChannel *int64 - EnableConfirmNotificationsToChannel *bool - TeammateNameDisplay *string - ExperimentalViewArchivedChannels *bool - ExperimentalEnableAutomaticReplies *bool - ExperimentalHideTownSquareinLHS *bool - ExperimentalTownSquareIsReadOnly *bool - ExperimentalPrimaryTeam *string - ExperimentalDefaultChannels []string + SiteName *string `access:"site"` + MaxUsersPerTeam *int `access:"site"` + DEPRECATED_DO_NOT_USE_EnableTeamCreation *bool `json:"EnableTeamCreation" mapstructure:"EnableTeamCreation"` // This field is deprecated and must not be used. + EnableUserCreation *bool `access:"authentication"` + EnableOpenServer *bool `access:"authentication"` + EnableUserDeactivation *bool `access:"experimental"` + RestrictCreationToDomains *string `access:"authentication"` + EnableCustomBrand *bool `access:"site"` + CustomBrandText *string `access:"site"` + CustomDescriptionText *string `access:"site"` + RestrictDirectMessage *string `access:"site"` + DEPRECATED_DO_NOT_USE_RestrictTeamInvite *string `json:"RestrictTeamInvite" mapstructure:"RestrictTeamInvite"` // This field is deprecated and must not be used. + DEPRECATED_DO_NOT_USE_RestrictPublicChannelManagement *string `json:"RestrictPublicChannelManagement" mapstructure:"RestrictPublicChannelManagement"` // This field is deprecated and must not be used. + DEPRECATED_DO_NOT_USE_RestrictPrivateChannelManagement *string `json:"RestrictPrivateChannelManagement" mapstructure:"RestrictPrivateChannelManagement"` // This field is deprecated and must not be used. + DEPRECATED_DO_NOT_USE_RestrictPublicChannelCreation *string `json:"RestrictPublicChannelCreation" mapstructure:"RestrictPublicChannelCreation"` // This field is deprecated and must not be used. + DEPRECATED_DO_NOT_USE_RestrictPrivateChannelCreation *string `json:"RestrictPrivateChannelCreation" mapstructure:"RestrictPrivateChannelCreation"` // This field is deprecated and must not be used. + DEPRECATED_DO_NOT_USE_RestrictPublicChannelDeletion *string `json:"RestrictPublicChannelDeletion" mapstructure:"RestrictPublicChannelDeletion"` // This field is deprecated and must not be used. + DEPRECATED_DO_NOT_USE_RestrictPrivateChannelDeletion *string `json:"RestrictPrivateChannelDeletion" mapstructure:"RestrictPrivateChannelDeletion"` // This field is deprecated and must not be used. + DEPRECATED_DO_NOT_USE_RestrictPrivateChannelManageMembers *string `json:"RestrictPrivateChannelManageMembers" mapstructure:"RestrictPrivateChannelManageMembers"` // This field is deprecated and must not be used. + EnableXToLeaveChannelsFromLHS *bool `access:"experimental"` + UserStatusAwayTimeout *int64 `access:"experimental"` + MaxChannelsPerTeam *int64 `access:"site"` + MaxNotificationsPerChannel *int64 `access:"environment"` + EnableConfirmNotificationsToChannel *bool `access:"site"` + TeammateNameDisplay *string `access:"site"` + ExperimentalViewArchivedChannels *bool `access:"experimental,site"` + ExperimentalEnableAutomaticReplies *bool `access:"experimental"` + ExperimentalHideTownSquareinLHS *bool `access:"experimental"` + ExperimentalTownSquareIsReadOnly *bool `access:"experimental"` + LockTeammateNameDisplay *bool `access:"site"` + ExperimentalPrimaryTeam *string `access:"experimental"` + ExperimentalDefaultChannels []string `access:"experimental"` } func (s *TeamSettings) SetDefaults() { @@ -1648,64 +1988,74 @@ func (s *TeamSettings) SetDefaults() { } if s.ExperimentalViewArchivedChannels == nil { - s.ExperimentalViewArchivedChannels = NewBool(false) + s.ExperimentalViewArchivedChannels = NewBool(true) + } + + if s.LockTeammateNameDisplay == nil { + s.LockTeammateNameDisplay = NewBool(false) } } type ClientRequirements struct { - AndroidLatestVersion string `restricted:"true"` - AndroidMinVersion string `restricted:"true"` - DesktopLatestVersion string `restricted:"true"` - DesktopMinVersion string `restricted:"true"` - IosLatestVersion string `restricted:"true"` - IosMinVersion string `restricted:"true"` + AndroidLatestVersion string `access:"write_restrictable,cloud_restrictable"` + AndroidMinVersion string `access:"write_restrictable,cloud_restrictable"` + DesktopLatestVersion string `access:"write_restrictable,cloud_restrictable"` + DesktopMinVersion string `access:"write_restrictable,cloud_restrictable"` + IosLatestVersion string `access:"write_restrictable,cloud_restrictable"` + IosMinVersion string `access:"write_restrictable,cloud_restrictable"` } type LdapSettings struct { // Basic - Enable *bool - EnableSync *bool - LdapServer *string - LdapPort *int - ConnectionSecurity *string - BaseDN *string - BindUsername *string - BindPassword *string + Enable *bool `access:"authentication"` + EnableSync *bool `access:"authentication"` + LdapServer *string `access:"authentication"` + LdapPort *int `access:"authentication"` + ConnectionSecurity *string `access:"authentication"` + BaseDN *string `access:"authentication"` + BindUsername *string `access:"authentication"` + BindPassword *string `access:"authentication"` // Filtering - UserFilter *string - GroupFilter *string + UserFilter *string `access:"authentication"` + GroupFilter *string `access:"authentication"` + GuestFilter *string `access:"authentication"` + EnableAdminFilter *bool + AdminFilter *string // Group Mapping - GroupDisplayNameAttribute *string - GroupIdAttribute *string + GroupDisplayNameAttribute *string `access:"authentication"` + GroupIdAttribute *string `access:"authentication"` // User Mapping - FirstNameAttribute *string - LastNameAttribute *string - EmailAttribute *string - UsernameAttribute *string - NicknameAttribute *string - IdAttribute *string - PositionAttribute *string - LoginIdAttribute *string + FirstNameAttribute *string `access:"authentication"` + LastNameAttribute *string `access:"authentication"` + EmailAttribute *string `access:"authentication"` + UsernameAttribute *string `access:"authentication"` + NicknameAttribute *string `access:"authentication"` + IdAttribute *string `access:"authentication"` + PositionAttribute *string `access:"authentication"` + LoginIdAttribute *string `access:"authentication"` + PictureAttribute *string `access:"authentication"` // Synchronization - SyncIntervalMinutes *int + SyncIntervalMinutes *int `access:"authentication"` // Advanced - SkipCertificateVerification *bool - QueryTimeout *int - MaxPageSize *int + SkipCertificateVerification *bool `access:"authentication"` + PublicCertificateFile *string `access:"authentication"` + PrivateKeyFile *string `access:"authentication"` + QueryTimeout *int `access:"authentication"` + MaxPageSize *int `access:"authentication"` // Customization - LoginFieldName *string + LoginFieldName *string `access:"authentication"` - LoginButtonColor *string - LoginButtonBorderColor *string - LoginButtonTextColor *string + LoginButtonColor *string `access:"authentication"` + LoginButtonBorderColor *string `access:"authentication"` + LoginButtonTextColor *string `access:"authentication"` - Trace *bool + Trace *bool `access:"authentication"` } func (s *LdapSettings) SetDefaults() { @@ -1718,6 +2068,10 @@ func (s *LdapSettings) SetDefaults() { s.EnableSync = NewBool(*s.Enable) } + if s.EnableAdminFilter == nil { + s.EnableAdminFilter = NewBool(false) + } + if s.LdapServer == nil { s.LdapServer = NewString("") } @@ -1730,6 +2084,14 @@ func (s *LdapSettings) SetDefaults() { s.ConnectionSecurity = NewString("") } + if s.PublicCertificateFile == nil { + s.PublicCertificateFile = NewString("") + } + + if s.PrivateKeyFile == nil { + s.PrivateKeyFile = NewString("") + } + if s.BaseDN == nil { s.BaseDN = NewString("") } @@ -1746,6 +2108,14 @@ func (s *LdapSettings) SetDefaults() { s.UserFilter = NewString("") } + if s.GuestFilter == nil { + s.GuestFilter = NewString("") + } + + if s.AdminFilter == nil { + s.AdminFilter = NewString("") + } + if s.GroupFilter == nil { s.GroupFilter = NewString("") } @@ -1786,6 +2156,10 @@ func (s *LdapSettings) SetDefaults() { s.PositionAttribute = NewString(LDAP_SETTINGS_DEFAULT_POSITION_ATTRIBUTE) } + if s.PictureAttribute == nil { + s.PictureAttribute = NewString(LDAP_SETTINGS_DEFAULT_PICTURE_ATTRIBUTE) + } + // For those upgrading to the version when LoginIdAttribute was added // they need IdAttribute == LoginIdAttribute not to break if s.LoginIdAttribute == nil { @@ -1830,9 +2204,9 @@ func (s *LdapSettings) SetDefaults() { } type ComplianceSettings struct { - Enable *bool - Directory *string - EnableDaily *bool + Enable *bool `access:"compliance"` + Directory *string `access:"compliance"` + EnableDaily *bool `access:"compliance"` } func (s *ComplianceSettings) SetDefaults() { @@ -1850,9 +2224,9 @@ func (s *ComplianceSettings) SetDefaults() { } type LocalizationSettings struct { - DefaultServerLocale *string - DefaultClientLocale *string - AvailableLocales *string + DefaultServerLocale *string `access:"site"` + DefaultClientLocale *string `access:"site"` + AvailableLocales *string `access:"site"` } func (s *LocalizationSettings) SetDefaults() { @@ -1871,40 +2245,49 @@ func (s *LocalizationSettings) SetDefaults() { type SamlSettings struct { // Basic - Enable *bool - EnableSyncWithLdap *bool - EnableSyncWithLdapIncludeAuth *bool + Enable *bool `access:"authentication"` + EnableSyncWithLdap *bool `access:"authentication"` + EnableSyncWithLdapIncludeAuth *bool `access:"authentication"` + IgnoreGuestsLdapSync *bool `access:"authentication"` + + Verify *bool `access:"authentication"` + Encrypt *bool `access:"authentication"` + SignRequest *bool `access:"authentication"` - Verify *bool - Encrypt *bool - SignRequest *bool + IdpUrl *string `access:"authentication"` + IdpDescriptorUrl *string `access:"authentication"` + IdpMetadataUrl *string `access:"authentication"` + ServiceProviderIdentifier *string `access:"authentication"` + AssertionConsumerServiceURL *string `access:"authentication"` - IdpUrl *string - IdpDescriptorUrl *string - AssertionConsumerServiceURL *string + SignatureAlgorithm *string `access:"authentication"` + CanonicalAlgorithm *string `access:"authentication"` - ScopingIDPProviderId *string - ScopingIDPName *string + ScopingIDPProviderId *string `access:"authentication"` + ScopingIDPName *string `access:"authentication"` - IdpCertificateFile *string - PublicCertificateFile *string - PrivateKeyFile *string + IdpCertificateFile *string `access:"authentication"` + PublicCertificateFile *string `access:"authentication"` + PrivateKeyFile *string `access:"authentication"` // User Mapping - IdAttribute *string - FirstNameAttribute *string - LastNameAttribute *string - EmailAttribute *string - UsernameAttribute *string - NicknameAttribute *string - LocaleAttribute *string - PositionAttribute *string + IdAttribute *string `access:"authentication"` + GuestAttribute *string `access:"authentication"` + EnableAdminAttribute *bool + AdminAttribute *string + FirstNameAttribute *string `access:"authentication"` + LastNameAttribute *string `access:"authentication"` + EmailAttribute *string `access:"authentication"` + UsernameAttribute *string `access:"authentication"` + NicknameAttribute *string `access:"authentication"` + LocaleAttribute *string `access:"authentication"` + PositionAttribute *string `access:"authentication"` - LoginButtonText *string + LoginButtonText *string `access:"authentication"` - LoginButtonColor *string - LoginButtonBorderColor *string - LoginButtonTextColor *string + LoginButtonColor *string `access:"authentication"` + LoginButtonBorderColor *string `access:"authentication"` + LoginButtonTextColor *string `access:"authentication"` } func (s *SamlSettings) SetDefaults() { @@ -1920,6 +2303,14 @@ func (s *SamlSettings) SetDefaults() { s.EnableSyncWithLdapIncludeAuth = NewBool(false) } + if s.IgnoreGuestsLdapSync == nil { + s.IgnoreGuestsLdapSync = NewBool(false) + } + + if s.EnableAdminAttribute == nil { + s.EnableAdminAttribute = NewBool(false) + } + if s.Verify == nil { s.Verify = NewBool(true) } @@ -1932,6 +2323,14 @@ func (s *SamlSettings) SetDefaults() { s.SignRequest = NewBool(false) } + if s.SignatureAlgorithm == nil { + s.SignatureAlgorithm = NewString(SAML_SETTINGS_DEFAULT_SIGNATURE_ALGORITHM) + } + + if s.CanonicalAlgorithm == nil { + s.CanonicalAlgorithm = NewString(SAML_SETTINGS_DEFAULT_CANONICAL_ALGORITHM) + } + if s.IdpUrl == nil { s.IdpUrl = NewString("") } @@ -1940,6 +2339,18 @@ func (s *SamlSettings) SetDefaults() { s.IdpDescriptorUrl = NewString("") } + if s.ServiceProviderIdentifier == nil { + if s.IdpDescriptorUrl != nil { + s.ServiceProviderIdentifier = NewString(*s.IdpDescriptorUrl) + } else { + s.ServiceProviderIdentifier = NewString("") + } + } + + if s.IdpMetadataUrl == nil { + s.IdpMetadataUrl = NewString("") + } + if s.IdpCertificateFile == nil { s.IdpCertificateFile = NewString("") } @@ -1972,6 +2383,12 @@ func (s *SamlSettings) SetDefaults() { s.IdAttribute = NewString(SAML_SETTINGS_DEFAULT_ID_ATTRIBUTE) } + if s.GuestAttribute == nil { + s.GuestAttribute = NewString(SAML_SETTINGS_DEFAULT_GUEST_ATTRIBUTE) + } + if s.AdminAttribute == nil { + s.AdminAttribute = NewString(SAML_SETTINGS_DEFAULT_ADMIN_ATTRIBUTE) + } if s.FirstNameAttribute == nil { s.FirstNameAttribute = NewString(SAML_SETTINGS_DEFAULT_FIRST_NAME_ATTRIBUTE) } @@ -2014,9 +2431,10 @@ func (s *SamlSettings) SetDefaults() { } type NativeAppSettings struct { - AppDownloadLink *string `restricted:"true"` - AndroidAppDownloadLink *string `restricted:"true"` - IosAppDownloadLink *string `restricted:"true"` + AppCustomURLSchemes []string `access:"site,write_restrictable,cloud_restrictable"` + AppDownloadLink *string `access:"site,write_restrictable,cloud_restrictable"` + AndroidAppDownloadLink *string `access:"site,write_restrictable,cloud_restrictable"` + IosAppDownloadLink *string `access:"site,write_restrictable,cloud_restrictable"` } func (s *NativeAppSettings) SetDefaults() { @@ -2031,30 +2449,34 @@ func (s *NativeAppSettings) SetDefaults() { if s.IosAppDownloadLink == nil { s.IosAppDownloadLink = NewString(NATIVEAPP_SETTINGS_DEFAULT_IOS_APP_DOWNLOAD_LINK) } + + if s.AppCustomURLSchemes == nil { + s.AppCustomURLSchemes = GetDefaultAppCustomURLSchemes() + } } type ElasticsearchSettings struct { - ConnectionUrl *string `restricted:"true"` - Username *string `restricted:"true"` - Password *string `restricted:"true"` - EnableIndexing *bool `restricted:"true"` - EnableSearching *bool `restricted:"true"` - EnableAutocomplete *bool `restricted:"true"` - Sniff *bool `restricted:"true"` - PostIndexReplicas *int `restricted:"true"` - PostIndexShards *int `restricted:"true"` - ChannelIndexReplicas *int `restricted:"true"` - ChannelIndexShards *int `restricted:"true"` - UserIndexReplicas *int `restricted:"true"` - UserIndexShards *int `restricted:"true"` - AggregatePostsAfterDays *int `restricted:"true"` - PostsAggregatorJobStartTime *string `restricted:"true"` - IndexPrefix *string `restricted:"true"` - LiveIndexingBatchSize *int `restricted:"true"` - BulkIndexingTimeWindowSeconds *int `restricted:"true"` - RequestTimeoutSeconds *int `restricted:"true"` - SkipTLSVerification *bool `restricted:"true"` - Trace *string `restricted:"true"` + ConnectionUrl *string `access:"environment,write_restrictable,cloud_restrictable"` + Username *string `access:"environment,write_restrictable,cloud_restrictable"` + Password *string `access:"environment,write_restrictable,cloud_restrictable"` + EnableIndexing *bool `access:"environment,write_restrictable,cloud_restrictable"` + EnableSearching *bool `access:"environment,write_restrictable,cloud_restrictable"` + EnableAutocomplete *bool `access:"environment,write_restrictable,cloud_restrictable"` + Sniff *bool `access:"environment,write_restrictable,cloud_restrictable"` + PostIndexReplicas *int `access:"environment,write_restrictable,cloud_restrictable"` + PostIndexShards *int `access:"environment,write_restrictable,cloud_restrictable"` + ChannelIndexReplicas *int `access:"environment,write_restrictable,cloud_restrictable"` + ChannelIndexShards *int `access:"environment,write_restrictable,cloud_restrictable"` + UserIndexReplicas *int `access:"environment,write_restrictable,cloud_restrictable"` + UserIndexShards *int `access:"environment,write_restrictable,cloud_restrictable"` + AggregatePostsAfterDays *int `access:"environment,write_restrictable,cloud_restrictable"` + PostsAggregatorJobStartTime *string `access:"environment,write_restrictable,cloud_restrictable"` + IndexPrefix *string `access:"environment,write_restrictable,cloud_restrictable"` + LiveIndexingBatchSize *int `access:"environment,write_restrictable,cloud_restrictable"` + BulkIndexingTimeWindowSeconds *int `access:"environment,write_restrictable,cloud_restrictable"` + RequestTimeoutSeconds *int `access:"environment,write_restrictable,cloud_restrictable"` + SkipTLSVerification *bool `access:"environment,write_restrictable,cloud_restrictable"` + Trace *string `access:"environment,write_restrictable,cloud_restrictable"` } func (s *ElasticsearchSettings) SetDefaults() { @@ -2143,12 +2565,42 @@ func (s *ElasticsearchSettings) SetDefaults() { } } +type BleveSettings struct { + IndexDir *string `access:"experimental"` + EnableIndexing *bool `access:"experimental"` + EnableSearching *bool `access:"experimental"` + EnableAutocomplete *bool `access:"experimental"` + BulkIndexingTimeWindowSeconds *int `access:"experimental"` +} + +func (bs *BleveSettings) SetDefaults() { + if bs.IndexDir == nil { + bs.IndexDir = NewString(BLEVE_SETTINGS_DEFAULT_INDEX_DIR) + } + + if bs.EnableIndexing == nil { + bs.EnableIndexing = NewBool(false) + } + + if bs.EnableSearching == nil { + bs.EnableSearching = NewBool(false) + } + + if bs.EnableAutocomplete == nil { + bs.EnableAutocomplete = NewBool(false) + } + + if bs.BulkIndexingTimeWindowSeconds == nil { + bs.BulkIndexingTimeWindowSeconds = NewInt(BLEVE_SETTINGS_DEFAULT_BULK_INDEXING_TIME_WINDOW_SECONDS) + } +} + type DataRetentionSettings struct { - EnableMessageDeletion *bool - EnableFileDeletion *bool - MessageRetentionDays *int - FileRetentionDays *int - DeletionJobStartTime *string + EnableMessageDeletion *bool `access:"compliance"` + EnableFileDeletion *bool `access:"compliance"` + MessageRetentionDays *int `access:"compliance"` + FileRetentionDays *int `access:"compliance"` + DeletionJobStartTime *string `access:"compliance"` } func (s *DataRetentionSettings) SetDefaults() { @@ -2174,8 +2626,8 @@ func (s *DataRetentionSettings) SetDefaults() { } type JobSettings struct { - RunJobs *bool `restricted:"true"` - RunScheduler *bool `restricted:"true"` + RunJobs *bool `access:"write_restrictable,cloud_restrictable"` + RunScheduler *bool `access:"write_restrictable,cloud_restrictable"` } func (s *JobSettings) SetDefaults() { @@ -2188,19 +2640,35 @@ func (s *JobSettings) SetDefaults() { } } +type CloudSettings struct { + CWSUrl *string `access:"environment,write_restrictable"` +} + +func (s *CloudSettings) SetDefaults() { + if s.CWSUrl == nil { + s.CWSUrl = NewString(CLOUD_SETTINGS_DEFAULT_CWS_URL) + } +} + type PluginState struct { Enable bool } type PluginSettings struct { - Enable *bool - EnableUploads *bool `restricted:"true"` - AllowInsecureDownloadUrl *bool `restricted:"true"` - EnableHealthCheck *bool `restricted:"true"` - Directory *string `restricted:"true"` - ClientDirectory *string `restricted:"true"` - Plugins map[string]map[string]interface{} - PluginStates map[string]*PluginState + Enable *bool `access:"plugins,write_restrictable"` + EnableUploads *bool `access:"plugins,write_restrictable,cloud_restrictable"` + AllowInsecureDownloadUrl *bool `access:"plugins,write_restrictable,cloud_restrictable"` + EnableHealthCheck *bool `access:"plugins,write_restrictable,cloud_restrictable"` + Directory *string `access:"plugins,write_restrictable,cloud_restrictable"` + ClientDirectory *string `access:"plugins,write_restrictable,cloud_restrictable"` + Plugins map[string]map[string]interface{} `access:"plugins"` + PluginStates map[string]*PluginState `access:"plugins"` + EnableMarketplace *bool `access:"plugins,write_restrictable,cloud_restrictable"` + EnableRemoteMarketplace *bool `access:"plugins,write_restrictable,cloud_restrictable"` + AutomaticPrepackagedPlugins *bool `access:"plugins,write_restrictable,cloud_restrictable"` + RequirePluginSignature *bool `access:"plugins,write_restrictable,cloud_restrictable"` + MarketplaceUrl *string `access:"plugins,write_restrictable,cloud_restrictable"` + SignaturePublicKeyFiles []string `access:"plugins,write_restrictable,cloud_restrictable"` } func (s *PluginSettings) SetDefaults(ls LogSettings) { @@ -2220,22 +2688,14 @@ func (s *PluginSettings) SetDefaults(ls LogSettings) { s.EnableHealthCheck = NewBool(true) } - if s.Directory == nil { + if s.Directory == nil || *s.Directory == "" { s.Directory = NewString(PLUGIN_SETTINGS_DEFAULT_DIRECTORY) } - if *s.Directory == "" { - *s.Directory = PLUGIN_SETTINGS_DEFAULT_DIRECTORY - } - - if s.ClientDirectory == nil { + if s.ClientDirectory == nil || *s.ClientDirectory == "" { s.ClientDirectory = NewString(PLUGIN_SETTINGS_DEFAULT_CLIENT_DIRECTORY) } - if *s.ClientDirectory == "" { - *s.ClientDirectory = PLUGIN_SETTINGS_DEFAULT_CLIENT_DIRECTORY - } - if s.Plugins == nil { s.Plugins = make(map[string]map[string]interface{}) } @@ -2248,13 +2708,48 @@ func (s *PluginSettings) SetDefaults(ls LogSettings) { // Enable the NPS plugin by default if diagnostics are enabled s.PluginStates["com.mattermost.nps"] = &PluginState{Enable: ls.EnableDiagnostics == nil || *ls.EnableDiagnostics} } + + if s.PluginStates["com.mattermost.plugin-incident-management"] == nil && BuildEnterpriseReady == "true" { + // Enable the incident management plugin by default + s.PluginStates["com.mattermost.plugin-incident-management"] = &PluginState{Enable: true} + } + + if s.PluginStates["com.mattermost.plugin-channel-export"] == nil && BuildEnterpriseReady == "true" { + // Enable the channel export plugin by default + s.PluginStates["com.mattermost.plugin-channel-export"] = &PluginState{Enable: true} + } + + if s.EnableMarketplace == nil { + s.EnableMarketplace = NewBool(PLUGIN_SETTINGS_DEFAULT_ENABLE_MARKETPLACE) + } + + if s.EnableRemoteMarketplace == nil { + s.EnableRemoteMarketplace = NewBool(true) + } + + if s.AutomaticPrepackagedPlugins == nil { + s.AutomaticPrepackagedPlugins = NewBool(true) + } + + if s.MarketplaceUrl == nil || *s.MarketplaceUrl == "" || *s.MarketplaceUrl == PLUGIN_SETTINGS_OLD_MARKETPLACE_URL { + s.MarketplaceUrl = NewString(PLUGIN_SETTINGS_DEFAULT_MARKETPLACE_URL) + } + + if s.RequirePluginSignature == nil { + s.RequirePluginSignature = NewBool(false) + } + + if s.SignaturePublicKeyFiles == nil { + s.SignaturePublicKeyFiles = []string{} + } } type GlobalRelayMessageExportSettings struct { - CustomerType *string // must be either A9 or A10, dictates SMTP server url - SmtpUsername *string - SmtpPassword *string - EmailAddress *string // the address to send messages to + CustomerType *string `access:"compliance"` // must be either A9 or A10, dictates SMTP server url + SmtpUsername *string `access:"compliance"` + SmtpPassword *string `access:"compliance"` + EmailAddress *string `access:"compliance"` // the address to send messages to + SMTPServerTimeout *int `access:"compliance"` } func (s *GlobalRelayMessageExportSettings) SetDefaults() { @@ -2270,17 +2765,21 @@ func (s *GlobalRelayMessageExportSettings) SetDefaults() { if s.EmailAddress == nil { s.EmailAddress = NewString("") } + if s.SMTPServerTimeout == nil || *s.SMTPServerTimeout == 0 { + s.SMTPServerTimeout = NewInt(1800) + } } type MessageExportSettings struct { - EnableExport *bool - ExportFormat *string - DailyRunTime *string - ExportFromTimestamp *int64 - BatchSize *int + EnableExport *bool `access:"compliance"` + ExportFormat *string `access:"compliance"` + DailyRunTime *string `access:"compliance"` + ExportFromTimestamp *int64 `access:"compliance"` + BatchSize *int `access:"compliance"` + DownloadExportResults *bool `access:"compliance"` // formatter-specific settings - these are only expected to be non-nil if ExportFormat is set to the associated format - GlobalRelaySettings *GlobalRelayMessageExportSettings + GlobalRelaySettings *GlobalRelayMessageExportSettings `access:"compliance"` } func (s *MessageExportSettings) SetDefaults() { @@ -2288,6 +2787,10 @@ func (s *MessageExportSettings) SetDefaults() { s.EnableExport = NewBool(false) } + if s.DownloadExportResults == nil { + s.DownloadExportResults = NewBool(false) + } + if s.ExportFormat == nil { s.ExportFormat = NewString(COMPLIANCE_EXPORT_TYPE_ACTIANCE) } @@ -2311,8 +2814,8 @@ func (s *MessageExportSettings) SetDefaults() { } type DisplaySettings struct { - CustomUrlSchemes []string - ExperimentalTimezone *bool + CustomUrlSchemes []string `access:"site"` + ExperimentalTimezone *bool `access:"experimental"` } func (s *DisplaySettings) SetDefaults() { @@ -2322,87 +2825,184 @@ func (s *DisplaySettings) SetDefaults() { } if s.ExperimentalTimezone == nil { - s.ExperimentalTimezone = NewBool(false) + s.ExperimentalTimezone = NewBool(true) + } +} + +type GuestAccountsSettings struct { + Enable *bool `access:"authentication"` + AllowEmailAccounts *bool `access:"authentication"` + EnforceMultifactorAuthentication *bool `access:"authentication"` + RestrictCreationToDomains *string `access:"authentication"` +} + +func (s *GuestAccountsSettings) SetDefaults() { + if s.Enable == nil { + s.Enable = NewBool(false) + } + + if s.AllowEmailAccounts == nil { + s.AllowEmailAccounts = NewBool(true) + } + + if s.EnforceMultifactorAuthentication == nil { + s.EnforceMultifactorAuthentication = NewBool(false) + } + + if s.RestrictCreationToDomains == nil { + s.RestrictCreationToDomains = NewString("") } } type ImageProxySettings struct { - Enable *bool - ImageProxyType *string - RemoteImageProxyURL *string - RemoteImageProxyOptions *string + Enable *bool `access:"environment"` + ImageProxyType *string `access:"environment"` + RemoteImageProxyURL *string `access:"environment"` + RemoteImageProxyOptions *string `access:"environment"` } -func (ips *ImageProxySettings) SetDefaults(ss ServiceSettings) { - if ips.Enable == nil { +func (s *ImageProxySettings) SetDefaults(ss ServiceSettings) { + if s.Enable == nil { if ss.DEPRECATED_DO_NOT_USE_ImageProxyType == nil || *ss.DEPRECATED_DO_NOT_USE_ImageProxyType == "" { - ips.Enable = NewBool(false) + s.Enable = NewBool(false) } else { - ips.Enable = NewBool(true) + s.Enable = NewBool(true) } } - if ips.ImageProxyType == nil { + if s.ImageProxyType == nil { if ss.DEPRECATED_DO_NOT_USE_ImageProxyType == nil || *ss.DEPRECATED_DO_NOT_USE_ImageProxyType == "" { - ips.ImageProxyType = NewString(IMAGE_PROXY_TYPE_LOCAL) + s.ImageProxyType = NewString(IMAGE_PROXY_TYPE_LOCAL) } else { - ips.ImageProxyType = ss.DEPRECATED_DO_NOT_USE_ImageProxyType + s.ImageProxyType = ss.DEPRECATED_DO_NOT_USE_ImageProxyType } } - if ips.RemoteImageProxyURL == nil { + if s.RemoteImageProxyURL == nil { if ss.DEPRECATED_DO_NOT_USE_ImageProxyURL == nil { - ips.RemoteImageProxyURL = NewString("") + s.RemoteImageProxyURL = NewString("") } else { - ips.RemoteImageProxyURL = ss.DEPRECATED_DO_NOT_USE_ImageProxyURL + s.RemoteImageProxyURL = ss.DEPRECATED_DO_NOT_USE_ImageProxyURL } } - if ips.RemoteImageProxyOptions == nil { + if s.RemoteImageProxyOptions == nil { if ss.DEPRECATED_DO_NOT_USE_ImageProxyOptions == nil { - ips.RemoteImageProxyOptions = NewString("") + s.RemoteImageProxyOptions = NewString("") } else { - ips.RemoteImageProxyOptions = ss.DEPRECATED_DO_NOT_USE_ImageProxyOptions + s.RemoteImageProxyOptions = ss.DEPRECATED_DO_NOT_USE_ImageProxyOptions } } } +// ImportSettings defines configuration settings for file imports. +type ImportSettings struct { + // The directory where to store the imported files. + Directory *string + // The number of days to retain the imported files before deleting them. + RetentionDays *int +} + +func (s *ImportSettings) isValid() *AppError { + if *s.Directory == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.import.directory.app_error", nil, "", http.StatusBadRequest) + } + + if *s.RetentionDays <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.import.retention_days_too_low.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +// SetDefaults applies the default settings to the struct. +func (s *ImportSettings) SetDefaults() { + if s.Directory == nil || *s.Directory == "" { + s.Directory = NewString(IMPORT_SETTINGS_DEFAULT_DIRECTORY) + } + + if s.RetentionDays == nil { + s.RetentionDays = NewInt(IMPORT_SETTINGS_DEFAULT_RETENTION_DAYS) + } +} + type ConfigFunc func() *Config +const ConfigAccessTagType = "access" +const ConfigAccessTagWriteRestrictable = "write_restrictable" +const ConfigAccessTagCloudRestrictable = "cloud_restrictable" + +// Config fields support the 'access' tag with the following values corresponding to the suffix of the associated +// PERMISSION_SYSCONSOLE_*_* permission Id: 'about', 'reporting', 'user_management_users', +// 'user_management_groups', 'user_management_teams', 'user_management_channels', +// 'user_management_permissions', 'environment', 'site', 'authentication', 'plugins', +// 'integrations', 'compliance', 'plugins', and 'experimental'. They grant read and/or write access to the config field +// to roles without PERMISSION_MANAGE_SYSTEM. +// +// By default config values can be written with PERMISSION_MANAGE_SYSTEM, but if ExperimentalSettings.RestrictSystemAdmin is true +// and the access tag contains the value 'write_restrictable', then even PERMISSION_MANAGE_SYSTEM does not grant write access. +// +// PERMISSION_MANAGE_SYSTEM always grants read access. +// +// Config values with the access tag 'cloud_restrictable' mean that are marked to be filtered when it's used in a cloud licensed +// environment with ExperimentalSettings.RestrictedSystemAdmin set to true. +// +// Example: +// type HairSettings struct { +// // Colour is writeable with either PERMISSION_SYSCONSOLE_WRITE_REPORTING or PERMISSION_SYSCONSOLE_WRITE_USER_MANAGEMENT_GROUPS. +// // It is readable by PERMISSION_SYSCONSOLE_READ_REPORTING and PERMISSION_SYSCONSOLE_READ_USER_MANAGEMENT_GROUPS permissions. +// // PERMISSION_MANAGE_SYSTEM grants read and write access. +// Colour string `access:"reporting,user_management_groups"` +// +// +// // Length is only readable and writable via PERMISSION_MANAGE_SYSTEM. +// Length string +// +// // Product is only writeable by PERMISSION_MANAGE_SYSTEM if ExperimentalSettings.RestrictSystemAdmin is false. +// // PERMISSION_MANAGE_SYSTEM can always read the value. +// Product bool `access:write_restrictable` +// } type Config struct { - ServiceSettings ServiceSettings - TeamSettings TeamSettings - ClientRequirements ClientRequirements - SqlSettings SqlSettings - LogSettings LogSettings - NotificationLogSettings NotificationLogSettings - PasswordSettings PasswordSettings - FileSettings FileSettings - EmailSettings EmailSettings - RateLimitSettings RateLimitSettings - PrivacySettings PrivacySettings - SupportSettings SupportSettings - AnnouncementSettings AnnouncementSettings - ThemeSettings ThemeSettings - GitLabSettings SSOSettings - GoogleSettings SSOSettings - Office365Settings SSOSettings - LdapSettings LdapSettings - ComplianceSettings ComplianceSettings - LocalizationSettings LocalizationSettings - SamlSettings SamlSettings - NativeAppSettings NativeAppSettings - ClusterSettings ClusterSettings - MetricsSettings MetricsSettings - ExperimentalSettings ExperimentalSettings - AnalyticsSettings AnalyticsSettings - ElasticsearchSettings ElasticsearchSettings - DataRetentionSettings DataRetentionSettings - MessageExportSettings MessageExportSettings - JobSettings JobSettings - PluginSettings PluginSettings - DisplaySettings DisplaySettings - ImageProxySettings ImageProxySettings + ServiceSettings ServiceSettings + TeamSettings TeamSettings + ClientRequirements ClientRequirements + SqlSettings SqlSettings + LogSettings LogSettings + ExperimentalAuditSettings ExperimentalAuditSettings + NotificationLogSettings NotificationLogSettings + PasswordSettings PasswordSettings + FileSettings FileSettings + EmailSettings EmailSettings + RateLimitSettings RateLimitSettings + PrivacySettings PrivacySettings + SupportSettings SupportSettings + AnnouncementSettings AnnouncementSettings + ThemeSettings ThemeSettings + GitLabSettings SSOSettings + GoogleSettings SSOSettings + Office365Settings Office365Settings + OpenIdSettings SSOSettings + LdapSettings LdapSettings + ComplianceSettings ComplianceSettings + LocalizationSettings LocalizationSettings + SamlSettings SamlSettings + NativeAppSettings NativeAppSettings + ClusterSettings ClusterSettings + MetricsSettings MetricsSettings + ExperimentalSettings ExperimentalSettings + AnalyticsSettings AnalyticsSettings + ElasticsearchSettings ElasticsearchSettings + BleveSettings BleveSettings + DataRetentionSettings DataRetentionSettings + MessageExportSettings MessageExportSettings + JobSettings JobSettings + PluginSettings PluginSettings + DisplaySettings DisplaySettings + GuestAccountsSettings GuestAccountsSettings + ImageProxySettings ImageProxySettings + CloudSettings CloudSettings + FeatureFlags *FeatureFlags `json:",omitempty"` + ImportSettings ImportSettings } func (o *Config) Clone() *Config { @@ -2418,6 +3018,18 @@ func (o *Config) ToJson() string { return string(b) } +func (o *Config) ToJsonFiltered(tagType, tagValue string) string { + filteredConfigMap := structToMapFilteredByTag(*o, tagType, tagValue) + for key, value := range filteredConfigMap { + v, ok := value.(map[string]interface{}) + if ok && len(v) == 0 { + delete(filteredConfigMap, key) + } + } + b, _ := json.Marshal(filteredConfigMap) + return string(b) +} + func (o *Config) GetSSOService(service string) *SSOSettings { switch service { case SERVICE_GITLAB: @@ -2425,7 +3037,9 @@ func (o *Config) GetSSOService(service string) *SSOSettings { case SERVICE_GOOGLE: return &o.GoogleSettings case SERVICE_OFFICE365: - return &o.Office365Settings + return o.Office365Settings.SSOSettings() + case SERVICE_OPENID: + return &o.OpenIdSettings } return nil @@ -2460,9 +3074,11 @@ func (o *Config) SetDefaults() { o.FileSettings.SetDefaults(isUpdate) o.EmailSettings.SetDefaults(isUpdate) o.PrivacySettings.setDefaults() - o.Office365Settings.setDefaults(OFFICE365_SETTINGS_DEFAULT_SCOPE, OFFICE365_SETTINGS_DEFAULT_AUTH_ENDPOINT, OFFICE365_SETTINGS_DEFAULT_TOKEN_ENDPOINT, OFFICE365_SETTINGS_DEFAULT_USER_API_ENDPOINT) - o.GitLabSettings.setDefaults("", "", "", "") - o.GoogleSettings.setDefaults(GOOGLE_SETTINGS_DEFAULT_SCOPE, GOOGLE_SETTINGS_DEFAULT_AUTH_ENDPOINT, GOOGLE_SETTINGS_DEFAULT_TOKEN_ENDPOINT, GOOGLE_SETTINGS_DEFAULT_USER_API_ENDPOINT) + o.Office365Settings.setDefaults() + o.Office365Settings.setDefaults() + o.GitLabSettings.setDefaults("", "", "", "", "") + o.GoogleSettings.setDefaults(GOOGLE_SETTINGS_DEFAULT_SCOPE, GOOGLE_SETTINGS_DEFAULT_AUTH_ENDPOINT, GOOGLE_SETTINGS_DEFAULT_TOKEN_ENDPOINT, GOOGLE_SETTINGS_DEFAULT_USER_API_ENDPOINT, "") + o.OpenIdSettings.setDefaults(OPENID_SETTINGS_DEFAULT_SCOPE, "", "", "", "#145DBF") o.ServiceSettings.SetDefaults(isUpdate) o.PasswordSettings.SetDefaults() o.TeamSettings.SetDefaults() @@ -2477,15 +3093,24 @@ func (o *Config) SetDefaults() { o.ComplianceSettings.SetDefaults() o.LocalizationSettings.SetDefaults() o.ElasticsearchSettings.SetDefaults() + o.BleveSettings.SetDefaults() o.NativeAppSettings.SetDefaults() o.DataRetentionSettings.SetDefaults() o.RateLimitSettings.SetDefaults() o.LogSettings.SetDefaults() + o.ExperimentalAuditSettings.SetDefaults() o.NotificationLogSettings.SetDefaults() o.JobSettings.SetDefaults() o.MessageExportSettings.SetDefaults() o.DisplaySettings.SetDefaults() + o.GuestAccountsSettings.SetDefaults() o.ImageProxySettings.SetDefaults(o.ServiceSettings) + o.CloudSettings.SetDefaults() + if o.FeatureFlags == nil { + o.FeatureFlags = &FeatureFlags{} + o.FeatureFlags.SetDefaults() + } + o.ImportSettings.SetDefaults() } func (o *Config) IsValid() *AppError { @@ -2541,6 +3166,10 @@ func (o *Config) IsValid() *AppError { return err } + if err := o.BleveSettings.isValid(); err != nil { + return err + } + if err := o.DataRetentionSettings.isValid(); err != nil { return err } @@ -2561,279 +3190,323 @@ func (o *Config) IsValid() *AppError { return err } + if err := o.ImportSettings.isValid(); err != nil { + return err + } return nil } -func (ts *TeamSettings) isValid() *AppError { - if *ts.MaxUsersPerTeam <= 0 { +func (s *TeamSettings) isValid() *AppError { + if *s.MaxUsersPerTeam <= 0 { return NewAppError("Config.IsValid", "model.config.is_valid.max_users.app_error", nil, "", http.StatusBadRequest) } - if *ts.MaxChannelsPerTeam <= 0 { + if *s.MaxChannelsPerTeam <= 0 { return NewAppError("Config.IsValid", "model.config.is_valid.max_channels.app_error", nil, "", http.StatusBadRequest) } - if *ts.MaxNotificationsPerChannel <= 0 { + if *s.MaxNotificationsPerChannel <= 0 { return NewAppError("Config.IsValid", "model.config.is_valid.max_notify_per_channel.app_error", nil, "", http.StatusBadRequest) } - if !(*ts.RestrictDirectMessage == DIRECT_MESSAGE_ANY || *ts.RestrictDirectMessage == DIRECT_MESSAGE_TEAM) { + if !(*s.RestrictDirectMessage == DIRECT_MESSAGE_ANY || *s.RestrictDirectMessage == DIRECT_MESSAGE_TEAM) { return NewAppError("Config.IsValid", "model.config.is_valid.restrict_direct_message.app_error", nil, "", http.StatusBadRequest) } - if !(*ts.TeammateNameDisplay == SHOW_FULLNAME || *ts.TeammateNameDisplay == SHOW_NICKNAME_FULLNAME || *ts.TeammateNameDisplay == SHOW_USERNAME) { + if !(*s.TeammateNameDisplay == SHOW_FULLNAME || *s.TeammateNameDisplay == SHOW_NICKNAME_FULLNAME || *s.TeammateNameDisplay == SHOW_USERNAME) { return NewAppError("Config.IsValid", "model.config.is_valid.teammate_name_display.app_error", nil, "", http.StatusBadRequest) } - if len(*ts.SiteName) == 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.sitename_empty.app_error", nil, "", http.StatusBadRequest) - } - - if len(*ts.SiteName) > SITENAME_MAX_LENGTH { + if len(*s.SiteName) > SITENAME_MAX_LENGTH { return NewAppError("Config.IsValid", "model.config.is_valid.sitename_length.app_error", map[string]interface{}{"MaxLength": SITENAME_MAX_LENGTH}, "", http.StatusBadRequest) } return nil } -func (ss *SqlSettings) isValid() *AppError { - if *ss.AtRestEncryptKey != "" && len(*ss.AtRestEncryptKey) < 32 { +func (s *SqlSettings) isValid() *AppError { + if *s.AtRestEncryptKey != "" && len(*s.AtRestEncryptKey) < 32 { return NewAppError("Config.IsValid", "model.config.is_valid.encrypt_sql.app_error", nil, "", http.StatusBadRequest) } - if !(*ss.DriverName == DATABASE_DRIVER_MYSQL || *ss.DriverName == DATABASE_DRIVER_POSTGRES) { + if !(*s.DriverName == DATABASE_DRIVER_MYSQL || *s.DriverName == DATABASE_DRIVER_POSTGRES) { return NewAppError("Config.IsValid", "model.config.is_valid.sql_driver.app_error", nil, "", http.StatusBadRequest) } - if *ss.MaxIdleConns <= 0 { + if *s.MaxIdleConns <= 0 { return NewAppError("Config.IsValid", "model.config.is_valid.sql_idle.app_error", nil, "", http.StatusBadRequest) } - if *ss.ConnMaxLifetimeMilliseconds < 0 { + if *s.ConnMaxLifetimeMilliseconds < 0 { return NewAppError("Config.IsValid", "model.config.is_valid.sql_conn_max_lifetime_milliseconds.app_error", nil, "", http.StatusBadRequest) } - if *ss.QueryTimeout <= 0 { + if *s.QueryTimeout <= 0 { return NewAppError("Config.IsValid", "model.config.is_valid.sql_query_timeout.app_error", nil, "", http.StatusBadRequest) } - if len(*ss.DataSource) == 0 { + if len(*s.DataSource) == 0 { return NewAppError("Config.IsValid", "model.config.is_valid.sql_data_src.app_error", nil, "", http.StatusBadRequest) } - if *ss.MaxOpenConns <= 0 { + if *s.MaxOpenConns <= 0 { return NewAppError("Config.IsValid", "model.config.is_valid.sql_max_conn.app_error", nil, "", http.StatusBadRequest) } return nil } -func (fs *FileSettings) isValid() *AppError { - if *fs.MaxFileSize <= 0 { +func (s *FileSettings) isValid() *AppError { + if *s.MaxFileSize <= 0 { return NewAppError("Config.IsValid", "model.config.is_valid.max_file_size.app_error", nil, "", http.StatusBadRequest) } - if !(*fs.DriverName == IMAGE_DRIVER_LOCAL || *fs.DriverName == IMAGE_DRIVER_S3) { + if !(*s.DriverName == IMAGE_DRIVER_LOCAL || *s.DriverName == IMAGE_DRIVER_S3) { return NewAppError("Config.IsValid", "model.config.is_valid.file_driver.app_error", nil, "", http.StatusBadRequest) } - if *fs.PublicLinkSalt != "" && len(*fs.PublicLinkSalt) < 32 { + if *s.PublicLinkSalt != "" && len(*s.PublicLinkSalt) < 32 { return NewAppError("Config.IsValid", "model.config.is_valid.file_salt.app_error", nil, "", http.StatusBadRequest) } + if *s.Directory == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.directory.app_error", nil, "", http.StatusBadRequest) + } + return nil } -func (es *EmailSettings) isValid() *AppError { - if !(*es.ConnectionSecurity == CONN_SECURITY_NONE || *es.ConnectionSecurity == CONN_SECURITY_TLS || *es.ConnectionSecurity == CONN_SECURITY_STARTTLS || *es.ConnectionSecurity == CONN_SECURITY_PLAIN) { +func (s *EmailSettings) isValid() *AppError { + if !(*s.ConnectionSecurity == CONN_SECURITY_NONE || *s.ConnectionSecurity == CONN_SECURITY_TLS || *s.ConnectionSecurity == CONN_SECURITY_STARTTLS || *s.ConnectionSecurity == CONN_SECURITY_PLAIN) { return NewAppError("Config.IsValid", "model.config.is_valid.email_security.app_error", nil, "", http.StatusBadRequest) } - if *es.EmailBatchingBufferSize <= 0 { + if *s.EmailBatchingBufferSize <= 0 { return NewAppError("Config.IsValid", "model.config.is_valid.email_batching_buffer_size.app_error", nil, "", http.StatusBadRequest) } - if *es.EmailBatchingInterval < 30 { + if *s.EmailBatchingInterval < 30 { return NewAppError("Config.IsValid", "model.config.is_valid.email_batching_interval.app_error", nil, "", http.StatusBadRequest) } - if !(*es.EmailNotificationContentsType == EMAIL_NOTIFICATION_CONTENTS_FULL || *es.EmailNotificationContentsType == EMAIL_NOTIFICATION_CONTENTS_GENERIC) { + if !(*s.EmailNotificationContentsType == EMAIL_NOTIFICATION_CONTENTS_FULL || *s.EmailNotificationContentsType == EMAIL_NOTIFICATION_CONTENTS_GENERIC) { return NewAppError("Config.IsValid", "model.config.is_valid.email_notification_contents_type.app_error", nil, "", http.StatusBadRequest) } return nil } -func (rls *RateLimitSettings) isValid() *AppError { - if *rls.MemoryStoreSize <= 0 { +func (s *RateLimitSettings) isValid() *AppError { + if *s.MemoryStoreSize <= 0 { return NewAppError("Config.IsValid", "model.config.is_valid.rate_mem.app_error", nil, "", http.StatusBadRequest) } - if *rls.PerSec <= 0 { + if *s.PerSec <= 0 { return NewAppError("Config.IsValid", "model.config.is_valid.rate_sec.app_error", nil, "", http.StatusBadRequest) } - if *rls.MaxBurst <= 0 { + if *s.MaxBurst <= 0 { return NewAppError("Config.IsValid", "model.config.is_valid.max_burst.app_error", nil, "", http.StatusBadRequest) } return nil } -func (ls *LdapSettings) isValid() *AppError { - if !(*ls.ConnectionSecurity == CONN_SECURITY_NONE || *ls.ConnectionSecurity == CONN_SECURITY_TLS || *ls.ConnectionSecurity == CONN_SECURITY_STARTTLS) { +func (s *LdapSettings) isValid() *AppError { + if !(*s.ConnectionSecurity == CONN_SECURITY_NONE || *s.ConnectionSecurity == CONN_SECURITY_TLS || *s.ConnectionSecurity == CONN_SECURITY_STARTTLS) { return NewAppError("Config.IsValid", "model.config.is_valid.ldap_security.app_error", nil, "", http.StatusBadRequest) } - if *ls.SyncIntervalMinutes <= 0 { + if *s.SyncIntervalMinutes <= 0 { return NewAppError("Config.IsValid", "model.config.is_valid.ldap_sync_interval.app_error", nil, "", http.StatusBadRequest) } - if *ls.MaxPageSize < 0 { + if *s.MaxPageSize < 0 { return NewAppError("Config.IsValid", "model.config.is_valid.ldap_max_page_size.app_error", nil, "", http.StatusBadRequest) } - if *ls.Enable { - if *ls.LdapServer == "" { + if *s.Enable { + if *s.LdapServer == "" { return NewAppError("Config.IsValid", "model.config.is_valid.ldap_server", nil, "", http.StatusBadRequest) } - if *ls.BaseDN == "" { + if *s.BaseDN == "" { return NewAppError("Config.IsValid", "model.config.is_valid.ldap_basedn", nil, "", http.StatusBadRequest) } - if *ls.EmailAttribute == "" { + if *s.EmailAttribute == "" { return NewAppError("Config.IsValid", "model.config.is_valid.ldap_email", nil, "", http.StatusBadRequest) } - if *ls.UsernameAttribute == "" { + if *s.UsernameAttribute == "" { return NewAppError("Config.IsValid", "model.config.is_valid.ldap_username", nil, "", http.StatusBadRequest) } - if *ls.IdAttribute == "" { + if *s.IdAttribute == "" { return NewAppError("Config.IsValid", "model.config.is_valid.ldap_id", nil, "", http.StatusBadRequest) } - if *ls.LoginIdAttribute == "" { + if *s.LoginIdAttribute == "" { return NewAppError("Config.IsValid", "model.config.is_valid.ldap_login_id", nil, "", http.StatusBadRequest) } - if *ls.UserFilter != "" { - if _, err := ldap.CompileFilter(*ls.UserFilter); err != nil { + if *s.UserFilter != "" { + if _, err := ldap.CompileFilter(*s.UserFilter); err != nil { return NewAppError("ValidateFilter", "ent.ldap.validate_filter.app_error", nil, err.Error(), http.StatusBadRequest) } } + + if *s.GuestFilter != "" { + if _, err := ldap.CompileFilter(*s.GuestFilter); err != nil { + return NewAppError("LdapSettings.isValid", "ent.ldap.validate_guest_filter.app_error", nil, err.Error(), http.StatusBadRequest) + } + } + + if *s.AdminFilter != "" { + if _, err := ldap.CompileFilter(*s.AdminFilter); err != nil { + return NewAppError("LdapSettings.isValid", "ent.ldap.validate_admin_filter.app_error", nil, err.Error(), http.StatusBadRequest) + } + } } return nil } -func (ss *SamlSettings) isValid() *AppError { - if *ss.Enable { - if len(*ss.IdpUrl) == 0 || !IsValidHttpUrl(*ss.IdpUrl) { +func (s *SamlSettings) isValid() *AppError { + if *s.Enable { + if len(*s.IdpUrl) == 0 || !IsValidHttpUrl(*s.IdpUrl) { return NewAppError("Config.IsValid", "model.config.is_valid.saml_idp_url.app_error", nil, "", http.StatusBadRequest) } - if len(*ss.IdpDescriptorUrl) == 0 || !IsValidHttpUrl(*ss.IdpDescriptorUrl) { + if len(*s.IdpDescriptorUrl) == 0 || !IsValidHttpUrl(*s.IdpDescriptorUrl) { return NewAppError("Config.IsValid", "model.config.is_valid.saml_idp_descriptor_url.app_error", nil, "", http.StatusBadRequest) } - if len(*ss.IdpCertificateFile) == 0 { + if len(*s.IdpCertificateFile) == 0 { return NewAppError("Config.IsValid", "model.config.is_valid.saml_idp_cert.app_error", nil, "", http.StatusBadRequest) } - if len(*ss.EmailAttribute) == 0 { + if len(*s.EmailAttribute) == 0 { return NewAppError("Config.IsValid", "model.config.is_valid.saml_email_attribute.app_error", nil, "", http.StatusBadRequest) } - if len(*ss.UsernameAttribute) == 0 { + if len(*s.UsernameAttribute) == 0 { return NewAppError("Config.IsValid", "model.config.is_valid.saml_username_attribute.app_error", nil, "", http.StatusBadRequest) } - if *ss.Verify { - if len(*ss.AssertionConsumerServiceURL) == 0 || !IsValidHttpUrl(*ss.AssertionConsumerServiceURL) { + if len(*s.ServiceProviderIdentifier) == 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_spidentifier_attribute.app_error", nil, "", http.StatusBadRequest) + } + + if *s.Verify { + if len(*s.AssertionConsumerServiceURL) == 0 || !IsValidHttpUrl(*s.AssertionConsumerServiceURL) { return NewAppError("Config.IsValid", "model.config.is_valid.saml_assertion_consumer_service_url.app_error", nil, "", http.StatusBadRequest) } } - if *ss.Encrypt { - if len(*ss.PrivateKeyFile) == 0 { + if *s.Encrypt { + if len(*s.PrivateKeyFile) == 0 { return NewAppError("Config.IsValid", "model.config.is_valid.saml_private_key.app_error", nil, "", http.StatusBadRequest) } - if len(*ss.PublicCertificateFile) == 0 { + if len(*s.PublicCertificateFile) == 0 { return NewAppError("Config.IsValid", "model.config.is_valid.saml_public_cert.app_error", nil, "", http.StatusBadRequest) } } - if len(*ss.EmailAttribute) == 0 { + if len(*s.EmailAttribute) == 0 { return NewAppError("Config.IsValid", "model.config.is_valid.saml_email_attribute.app_error", nil, "", http.StatusBadRequest) } + + if !(*s.SignatureAlgorithm == SAML_SETTINGS_SIGNATURE_ALGORITHM_SHA1 || *s.SignatureAlgorithm == SAML_SETTINGS_SIGNATURE_ALGORITHM_SHA256 || *s.SignatureAlgorithm == SAML_SETTINGS_SIGNATURE_ALGORITHM_SHA512) { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_signature_algorithm.app_error", nil, "", http.StatusBadRequest) + } + if !(*s.CanonicalAlgorithm == SAML_SETTINGS_CANONICAL_ALGORITHM_C14N || *s.CanonicalAlgorithm == SAML_SETTINGS_CANONICAL_ALGORITHM_C14N11) { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_canonical_algorithm.app_error", nil, "", http.StatusBadRequest) + } + + if len(*s.GuestAttribute) > 0 { + if !(strings.Contains(*s.GuestAttribute, "=")) { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_guest_attribute.app_error", nil, "", http.StatusBadRequest) + } + if len(strings.Split(*s.GuestAttribute, "=")) != 2 { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_guest_attribute.app_error", nil, "", http.StatusBadRequest) + } + } + + if len(*s.AdminAttribute) > 0 { + if !(strings.Contains(*s.AdminAttribute, "=")) { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_admin_attribute.app_error", nil, "", http.StatusBadRequest) + } + if len(strings.Split(*s.AdminAttribute, "=")) != 2 { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_admin_attribute.app_error", nil, "", http.StatusBadRequest) + } + } } return nil } -func (ss *ServiceSettings) isValid() *AppError { - if !(*ss.ConnectionSecurity == CONN_SECURITY_NONE || *ss.ConnectionSecurity == CONN_SECURITY_TLS) { +func (s *ServiceSettings) isValid() *AppError { + if !(*s.ConnectionSecurity == CONN_SECURITY_NONE || *s.ConnectionSecurity == CONN_SECURITY_TLS) { return NewAppError("Config.IsValid", "model.config.is_valid.webserver_security.app_error", nil, "", http.StatusBadRequest) } - if *ss.ConnectionSecurity == CONN_SECURITY_TLS && *ss.UseLetsEncrypt == false { + if *s.ConnectionSecurity == CONN_SECURITY_TLS && !*s.UseLetsEncrypt { appErr := NewAppError("Config.IsValid", "model.config.is_valid.tls_cert_file.app_error", nil, "", http.StatusBadRequest) - if *ss.TLSCertFile == "" { + if *s.TLSCertFile == "" { return appErr - } else if _, err := os.Stat(*ss.TLSCertFile); os.IsNotExist(err) { + } else if _, err := os.Stat(*s.TLSCertFile); os.IsNotExist(err) { return appErr } appErr = NewAppError("Config.IsValid", "model.config.is_valid.tls_key_file.app_error", nil, "", http.StatusBadRequest) - if *ss.TLSKeyFile == "" { + if *s.TLSKeyFile == "" { return appErr - } else if _, err := os.Stat(*ss.TLSKeyFile); os.IsNotExist(err) { + } else if _, err := os.Stat(*s.TLSKeyFile); os.IsNotExist(err) { return appErr } } - if len(ss.TLSOverwriteCiphers) > 0 { - for _, cipher := range ss.TLSOverwriteCiphers { + if len(s.TLSOverwriteCiphers) > 0 { + for _, cipher := range s.TLSOverwriteCiphers { if _, ok := ServerTLSSupportedCiphers[cipher]; !ok { return NewAppError("Config.IsValid", "model.config.is_valid.tls_overwrite_cipher.app_error", map[string]interface{}{"name": cipher}, "", http.StatusBadRequest) } } } - if *ss.ReadTimeout <= 0 { + if *s.ReadTimeout <= 0 { return NewAppError("Config.IsValid", "model.config.is_valid.read_timeout.app_error", nil, "", http.StatusBadRequest) } - if *ss.WriteTimeout <= 0 { + if *s.WriteTimeout <= 0 { return NewAppError("Config.IsValid", "model.config.is_valid.write_timeout.app_error", nil, "", http.StatusBadRequest) } - if *ss.TimeBetweenUserTypingUpdatesMilliseconds < 1000 { + if *s.TimeBetweenUserTypingUpdatesMilliseconds < 1000 { return NewAppError("Config.IsValid", "model.config.is_valid.time_between_user_typing.app_error", nil, "", http.StatusBadRequest) } - if *ss.MaximumLoginAttempts <= 0 { + if *s.MaximumLoginAttempts <= 0 { return NewAppError("Config.IsValid", "model.config.is_valid.login_attempts.app_error", nil, "", http.StatusBadRequest) } - if len(*ss.SiteURL) != 0 { - if _, err := url.ParseRequestURI(*ss.SiteURL); err != nil { + if *s.SiteURL != "" { + if _, err := url.ParseRequestURI(*s.SiteURL); err != nil { return NewAppError("Config.IsValid", "model.config.is_valid.site_url.app_error", nil, "", http.StatusBadRequest) } } - if len(*ss.WebsocketURL) != 0 { - if _, err := url.ParseRequestURI(*ss.WebsocketURL); err != nil { + if *s.WebsocketURL != "" { + if _, err := url.ParseRequestURI(*s.WebsocketURL); err != nil { return NewAppError("Config.IsValid", "model.config.is_valid.websocket_url.app_error", nil, "", http.StatusBadRequest) } } - host, port, _ := net.SplitHostPort(*ss.ListenAddress) + host, port, _ := net.SplitHostPort(*s.ListenAddress) var isValidHost bool if host == "" { isValidHost = true @@ -2845,72 +3518,98 @@ func (ss *ServiceSettings) isValid() *AppError { return NewAppError("Config.IsValid", "model.config.is_valid.listen_address.app_error", nil, "", http.StatusBadRequest) } - if *ss.ExperimentalGroupUnreadChannels != GROUP_UNREAD_CHANNELS_DISABLED && - *ss.ExperimentalGroupUnreadChannels != GROUP_UNREAD_CHANNELS_DEFAULT_ON && - *ss.ExperimentalGroupUnreadChannels != GROUP_UNREAD_CHANNELS_DEFAULT_OFF { + if *s.ExperimentalGroupUnreadChannels != GROUP_UNREAD_CHANNELS_DISABLED && + *s.ExperimentalGroupUnreadChannels != GROUP_UNREAD_CHANNELS_DEFAULT_ON && + *s.ExperimentalGroupUnreadChannels != GROUP_UNREAD_CHANNELS_DEFAULT_OFF { return NewAppError("Config.IsValid", "model.config.is_valid.group_unread_channels.app_error", nil, "", http.StatusBadRequest) } + if *s.CollapsedThreads != COLLAPSED_THREADS_DISABLED && + *s.CollapsedThreads != COLLAPSED_THREADS_DEFAULT_ON && + *s.CollapsedThreads != COLLAPSED_THREADS_DEFAULT_OFF { + return NewAppError("Config.IsValid", "model.config.is_valid.collapsed_threads.app_error", nil, "", http.StatusBadRequest) + } + return nil } -func (ess *ElasticsearchSettings) isValid() *AppError { - if *ess.EnableIndexing { - if len(*ess.ConnectionUrl) == 0 { +func (s *ElasticsearchSettings) isValid() *AppError { + if *s.EnableIndexing { + if len(*s.ConnectionUrl) == 0 { return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.connection_url.app_error", nil, "", http.StatusBadRequest) } } - if *ess.EnableSearching && !*ess.EnableIndexing { + if *s.EnableSearching && !*s.EnableIndexing { return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.enable_searching.app_error", nil, "", http.StatusBadRequest) } - if *ess.EnableAutocomplete && !*ess.EnableIndexing { + if *s.EnableAutocomplete && !*s.EnableIndexing { return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.enable_autocomplete.app_error", nil, "", http.StatusBadRequest) } - if *ess.AggregatePostsAfterDays < 1 { + if *s.AggregatePostsAfterDays < 1 { return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.aggregate_posts_after_days.app_error", nil, "", http.StatusBadRequest) } - if _, err := time.Parse("15:04", *ess.PostsAggregatorJobStartTime); err != nil { + if _, err := time.Parse("15:04", *s.PostsAggregatorJobStartTime); err != nil { return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.posts_aggregator_job_start_time.app_error", nil, err.Error(), http.StatusBadRequest) } - if *ess.LiveIndexingBatchSize < 1 { + if *s.LiveIndexingBatchSize < 1 { return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.live_indexing_batch_size.app_error", nil, "", http.StatusBadRequest) } - if *ess.BulkIndexingTimeWindowSeconds < 1 { + if *s.BulkIndexingTimeWindowSeconds < 1 { return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.bulk_indexing_time_window_seconds.app_error", nil, "", http.StatusBadRequest) } - if *ess.RequestTimeoutSeconds < 1 { + if *s.RequestTimeoutSeconds < 1 { return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.request_timeout_seconds.app_error", nil, "", http.StatusBadRequest) } return nil } -func (drs *DataRetentionSettings) isValid() *AppError { - if *drs.MessageRetentionDays <= 0 { +func (bs *BleveSettings) isValid() *AppError { + if *bs.EnableIndexing { + if len(*bs.IndexDir) == 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.bleve_search.filename.app_error", nil, "", http.StatusBadRequest) + } + } else { + if *bs.EnableSearching { + return NewAppError("Config.IsValid", "model.config.is_valid.bleve_search.enable_searching.app_error", nil, "", http.StatusBadRequest) + } + if *bs.EnableAutocomplete { + return NewAppError("Config.IsValid", "model.config.is_valid.bleve_search.enable_autocomplete.app_error", nil, "", http.StatusBadRequest) + } + } + if *bs.BulkIndexingTimeWindowSeconds < 1 { + return NewAppError("Config.IsValid", "model.config.is_valid.bleve_search.bulk_indexing_time_window_seconds.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (s *DataRetentionSettings) isValid() *AppError { + if *s.MessageRetentionDays <= 0 { return NewAppError("Config.IsValid", "model.config.is_valid.data_retention.message_retention_days_too_low.app_error", nil, "", http.StatusBadRequest) } - if *drs.FileRetentionDays <= 0 { + if *s.FileRetentionDays <= 0 { return NewAppError("Config.IsValid", "model.config.is_valid.data_retention.file_retention_days_too_low.app_error", nil, "", http.StatusBadRequest) } - if _, err := time.Parse("15:04", *drs.DeletionJobStartTime); err != nil { + if _, err := time.Parse("15:04", *s.DeletionJobStartTime); err != nil { return NewAppError("Config.IsValid", "model.config.is_valid.data_retention.deletion_job_start_time.app_error", nil, err.Error(), http.StatusBadRequest) } return nil } -func (ls *LocalizationSettings) isValid() *AppError { - if len(*ls.AvailableLocales) > 0 { - if !strings.Contains(*ls.AvailableLocales, *ls.DefaultClientLocale) { +func (s *LocalizationSettings) isValid() *AppError { + if len(*s.AvailableLocales) > 0 { + if !strings.Contains(*s.AvailableLocales, *s.DefaultClientLocale) { return NewAppError("Config.IsValid", "model.config.is_valid.localization.available_locales.app_error", nil, "", http.StatusBadRequest) } } @@ -2918,35 +3617,35 @@ func (ls *LocalizationSettings) isValid() *AppError { return nil } -func (mes *MessageExportSettings) isValid(fs FileSettings) *AppError { - if mes.EnableExport == nil { +func (s *MessageExportSettings) isValid(fs FileSettings) *AppError { + if s.EnableExport == nil { return NewAppError("Config.IsValid", "model.config.is_valid.message_export.enable.app_error", nil, "", http.StatusBadRequest) } - if *mes.EnableExport { - if mes.ExportFromTimestamp == nil || *mes.ExportFromTimestamp < 0 || *mes.ExportFromTimestamp > GetMillis() { + if *s.EnableExport { + if s.ExportFromTimestamp == nil || *s.ExportFromTimestamp < 0 || *s.ExportFromTimestamp > GetMillis() { return NewAppError("Config.IsValid", "model.config.is_valid.message_export.export_from.app_error", nil, "", http.StatusBadRequest) - } else if mes.DailyRunTime == nil { + } else if s.DailyRunTime == nil { return NewAppError("Config.IsValid", "model.config.is_valid.message_export.daily_runtime.app_error", nil, "", http.StatusBadRequest) - } else if _, err := time.Parse("15:04", *mes.DailyRunTime); err != nil { + } else if _, err := time.Parse("15:04", *s.DailyRunTime); err != nil { return NewAppError("Config.IsValid", "model.config.is_valid.message_export.daily_runtime.app_error", nil, err.Error(), http.StatusBadRequest) - } else if mes.BatchSize == nil || *mes.BatchSize < 0 { + } else if s.BatchSize == nil || *s.BatchSize < 0 { return NewAppError("Config.IsValid", "model.config.is_valid.message_export.batch_size.app_error", nil, "", http.StatusBadRequest) - } else if mes.ExportFormat == nil || (*mes.ExportFormat != COMPLIANCE_EXPORT_TYPE_ACTIANCE && *mes.ExportFormat != COMPLIANCE_EXPORT_TYPE_GLOBALRELAY && *mes.ExportFormat != COMPLIANCE_EXPORT_TYPE_CSV) { + } else if s.ExportFormat == nil || (*s.ExportFormat != COMPLIANCE_EXPORT_TYPE_ACTIANCE && *s.ExportFormat != COMPLIANCE_EXPORT_TYPE_GLOBALRELAY && *s.ExportFormat != COMPLIANCE_EXPORT_TYPE_CSV) { return NewAppError("Config.IsValid", "model.config.is_valid.message_export.export_type.app_error", nil, "", http.StatusBadRequest) } - if *mes.ExportFormat == COMPLIANCE_EXPORT_TYPE_GLOBALRELAY { - if mes.GlobalRelaySettings == nil { + if *s.ExportFormat == COMPLIANCE_EXPORT_TYPE_GLOBALRELAY { + if s.GlobalRelaySettings == nil { return NewAppError("Config.IsValid", "model.config.is_valid.message_export.global_relay.config_missing.app_error", nil, "", http.StatusBadRequest) - } else if mes.GlobalRelaySettings.CustomerType == nil || (*mes.GlobalRelaySettings.CustomerType != GLOBALRELAY_CUSTOMER_TYPE_A9 && *mes.GlobalRelaySettings.CustomerType != GLOBALRELAY_CUSTOMER_TYPE_A10) { + } else if s.GlobalRelaySettings.CustomerType == nil || (*s.GlobalRelaySettings.CustomerType != GLOBALRELAY_CUSTOMER_TYPE_A9 && *s.GlobalRelaySettings.CustomerType != GLOBALRELAY_CUSTOMER_TYPE_A10) { return NewAppError("Config.IsValid", "model.config.is_valid.message_export.global_relay.customer_type.app_error", nil, "", http.StatusBadRequest) - } else if mes.GlobalRelaySettings.EmailAddress == nil || !strings.Contains(*mes.GlobalRelaySettings.EmailAddress, "@") { + } else if s.GlobalRelaySettings.EmailAddress == nil || !strings.Contains(*s.GlobalRelaySettings.EmailAddress, "@") { // validating email addresses is hard - just make sure it contains an '@' sign // see https://stackoverflow.com/questions/201323/using-a-regular-expression-to-validate-an-email-address return NewAppError("Config.IsValid", "model.config.is_valid.message_export.global_relay.email_address.app_error", nil, "", http.StatusBadRequest) - } else if mes.GlobalRelaySettings.SmtpUsername == nil || *mes.GlobalRelaySettings.SmtpUsername == "" { + } else if s.GlobalRelaySettings.SmtpUsername == nil || *s.GlobalRelaySettings.SmtpUsername == "" { return NewAppError("Config.IsValid", "model.config.is_valid.message_export.global_relay.smtp_username.app_error", nil, "", http.StatusBadRequest) - } else if mes.GlobalRelaySettings.SmtpPassword == nil || *mes.GlobalRelaySettings.SmtpPassword == "" { + } else if s.GlobalRelaySettings.SmtpPassword == nil || *s.GlobalRelaySettings.SmtpPassword == "" { return NewAppError("Config.IsValid", "model.config.is_valid.message_export.global_relay.smtp_password.app_error", nil, "", http.StatusBadRequest) } } @@ -2954,11 +3653,11 @@ func (mes *MessageExportSettings) isValid(fs FileSettings) *AppError { return nil } -func (ds *DisplaySettings) isValid() *AppError { - if len(ds.CustomUrlSchemes) != 0 { +func (s *DisplaySettings) isValid() *AppError { + if len(s.CustomUrlSchemes) != 0 { validProtocolPattern := regexp.MustCompile(`(?i)^\s*[A-Za-z][A-Za-z0-9.+-]*\s*$`) - for _, scheme := range ds.CustomUrlSchemes { + for _, scheme := range s.CustomUrlSchemes { if !validProtocolPattern.MatchString(scheme) { return NewAppError( "Config.IsValid", @@ -2974,17 +3673,17 @@ func (ds *DisplaySettings) isValid() *AppError { return nil } -func (ips *ImageProxySettings) isValid() *AppError { - if *ips.Enable { - switch *ips.ImageProxyType { +func (s *ImageProxySettings) isValid() *AppError { + if *s.Enable { + switch *s.ImageProxyType { case IMAGE_PROXY_TYPE_LOCAL: // No other settings to validate case IMAGE_PROXY_TYPE_ATMOS_CAMO: - if *ips.RemoteImageProxyURL == "" { + if *s.RemoteImageProxyURL == "" { return NewAppError("Config.IsValid", "model.config.is_valid.atmos_camo_image_proxy_url.app_error", nil, "", http.StatusBadRequest) } - if *ips.RemoteImageProxyOptions == "" { + if *s.RemoteImageProxyOptions == "" { return NewAppError("Config.IsValid", "model.config.is_valid.atmos_camo_image_proxy_options.app_error", nil, "", http.StatusBadRequest) } default: @@ -3022,6 +3721,18 @@ func (o *Config) Sanitize() { *o.GitLabSettings.Secret = FAKE_SETTING } + if o.GoogleSettings.Secret != nil && len(*o.GoogleSettings.Secret) > 0 { + *o.GoogleSettings.Secret = FAKE_SETTING + } + + if o.Office365Settings.Secret != nil && len(*o.Office365Settings.Secret) > 0 { + *o.Office365Settings.Secret = FAKE_SETTING + } + + if o.OpenIdSettings.Secret != nil && len(*o.OpenIdSettings.Secret) > 0 { + *o.OpenIdSettings.Secret = FAKE_SETTING + } + *o.SqlSettings.DataSource = FAKE_SETTING *o.SqlSettings.AtRestEncryptKey = FAKE_SETTING @@ -3034,4 +3745,74 @@ func (o *Config) Sanitize() { for i := range o.SqlSettings.DataSourceSearchReplicas { o.SqlSettings.DataSourceSearchReplicas[i] = FAKE_SETTING } + + if o.MessageExportSettings.GlobalRelaySettings.SmtpPassword != nil && len(*o.MessageExportSettings.GlobalRelaySettings.SmtpPassword) > 0 { + *o.MessageExportSettings.GlobalRelaySettings.SmtpPassword = FAKE_SETTING + } + + if o.ServiceSettings.GfycatApiSecret != nil && len(*o.ServiceSettings.GfycatApiSecret) > 0 { + *o.ServiceSettings.GfycatApiSecret = FAKE_SETTING + } + + *o.ServiceSettings.SplitKey = FAKE_SETTING +} + +// structToMapFilteredByTag converts a struct into a map removing those fields that has the tag passed +// as argument +func structToMapFilteredByTag(t interface{}, typeOfTag, filterTag string) map[string]interface{} { + defer func() { + if r := recover(); r != nil { + mlog.Warn("Panicked in structToMapFilteredByTag. This should never happen.", mlog.Any("recover", r)) + } + }() + + val := reflect.ValueOf(t) + elemField := reflect.TypeOf(t) + + if val.Kind() != reflect.Struct { + return nil + } + + out := map[string]interface{}{} + + for i := 0; i < val.NumField(); i++ { + field := val.Field(i) + + structField := elemField.Field(i) + tagPermissions := strings.Split(structField.Tag.Get(typeOfTag), ",") + if isTagPresent(filterTag, tagPermissions) { + continue + } + + var value interface{} + + switch field.Kind() { + case reflect.Struct: + value = structToMapFilteredByTag(field.Interface(), typeOfTag, filterTag) + case reflect.Ptr: + indirectType := field.Elem() + if indirectType.Kind() == reflect.Struct { + value = structToMapFilteredByTag(indirectType.Interface(), typeOfTag, filterTag) + } else if indirectType.Kind() != reflect.Invalid { + value = indirectType.Interface() + } + default: + value = field.Interface() + } + + out[val.Type().Field(i).Name] = value + } + + return out +} + +func isTagPresent(tag string, tags []string) bool { + for _, val := range tags { + tagValue := strings.TrimSpace(val) + if tagValue != "" && tagValue == tag { + return true + } + } + + return false } diff --git a/vendor/github.com/mattermost/mattermost-server/model/data_retention_policy.go b/vendor/github.com/mattermost/mattermost-server/v5/model/data_retention_policy.go similarity index 62% rename from vendor/github.com/mattermost/mattermost-server/model/data_retention_policy.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/data_retention_policy.go index dbb1337..42fb969 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/data_retention_policy.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/data_retention_policy.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -15,13 +15,13 @@ type DataRetentionPolicy struct { FileRetentionCutoff int64 `json:"file_retention_cutoff"` } -func (me *DataRetentionPolicy) ToJson() string { - b, _ := json.Marshal(me) +func (drp *DataRetentionPolicy) ToJson() string { + b, _ := json.Marshal(drp) return string(b) } func DataRetentionPolicyFromJson(data io.Reader) *DataRetentionPolicy { - var me *DataRetentionPolicy - json.NewDecoder(data).Decode(&me) - return me + var drp *DataRetentionPolicy + json.NewDecoder(data).Decode(&drp) + return drp } diff --git a/vendor/github.com/mattermost/mattermost-server/model/emoji.go b/vendor/github.com/mattermost/mattermost-server/v5/model/emoji.go similarity index 94% rename from vendor/github.com/mattermost/mattermost-server/model/emoji.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/emoji.go index c2ef25a..aeee9b3 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/emoji.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/emoji.go @@ -1,5 +1,5 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -37,7 +37,7 @@ func GetSystemEmojiId(emojiName string) (string, bool) { } func (emoji *Emoji) IsValid() *AppError { - if len(emoji.Id) != 26 { + if !IsValidId(emoji.Id) { return NewAppError("Emoji.IsValid", "model.emoji.id.app_error", nil, "", http.StatusBadRequest) } diff --git a/vendor/github.com/mattermost/mattermost-server/model/emoji_data.go b/vendor/github.com/mattermost/mattermost-server/v5/model/emoji_data.go similarity index 99% rename from vendor/github.com/mattermost/mattermost-server/model/emoji_data.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/emoji_data.go index f6e62e6..807f6ab 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/emoji_data.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/emoji_data.go @@ -1,5 +1,5 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model diff --git a/vendor/github.com/mattermost/mattermost-server/model/emoji_search.go b/vendor/github.com/mattermost/mattermost-server/v5/model/emoji_search.go similarity index 77% rename from vendor/github.com/mattermost/mattermost-server/model/emoji_search.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/emoji_search.go index 3a768a5..71e2671 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/emoji_search.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/emoji_search.go @@ -1,5 +1,5 @@ -// Copyright (c) 2018-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/feature_flags.go b/vendor/github.com/mattermost/mattermost-server/v5/model/feature_flags.go new file mode 100644 index 0000000..0c8d670 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/feature_flags.go @@ -0,0 +1,51 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import "reflect" + +type FeatureFlags struct { + // Exists only for unit and manual testing. + // When set to a value, will be returned by the ping endpoint. + TestFeature string + // Exists only for testing bool functionality. Boolean feature flags interpret "on" or "true" as true and + // all other values as false. + TestBoolFeature bool + + // Toggle on and off scheduled jobs for cloud user limit emails see MM-29999 + CloudDelinquentEmailJobsEnabled bool + + // Toggle on and off support for Collapsed Threads + CollapsedThreads bool + // Feature flags to control plugin versions + PluginIncidentManagement string `plugin_id:"com.mattermost.plugin-incident-management"` +} + +func (f *FeatureFlags) SetDefaults() { + f.TestFeature = "off" + f.TestBoolFeature = false + f.CloudDelinquentEmailJobsEnabled = false + f.CollapsedThreads = false + f.PluginIncidentManagement = "1.3.2" +} + +func (f *FeatureFlags) Plugins() map[string]string { + rFFVal := reflect.ValueOf(f).Elem() + rFFType := reflect.TypeOf(f).Elem() + + pluginVersions := make(map[string]string) + for i := 0; i < rFFVal.NumField(); i++ { + rFieldVal := rFFVal.Field(i) + rFieldType := rFFType.Field(i) + + pluginId, hasPluginId := rFieldType.Tag.Lookup("plugin_id") + if !hasPluginId { + continue + } + + pluginVersions[pluginId] = rFieldVal.String() + } + + return pluginVersions +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/file.go b/vendor/github.com/mattermost/mattermost-server/v5/model/file.go similarity index 86% rename from vendor/github.com/mattermost/mattermost-server/model/file.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/file.go index e3ee032..9f76bac 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/file.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/file.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -9,7 +9,7 @@ import ( ) const ( - MaxImageSize = 6048 * 4032 // 24 megapixels, roughly 36MB as a raw image + MaxImageSize = int64(6048 * 4032) // 24 megapixels, roughly 36MB as a raw image ) var ( diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/file_info.go b/vendor/github.com/mattermost/mattermost-server/v5/model/file_info.go new file mode 100644 index 0000000..0c34731 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/file_info.go @@ -0,0 +1,228 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "bytes" + "encoding/json" + "image" + "image/gif" + "image/jpeg" + "io" + "mime" + "net/http" + "path/filepath" + "strings" + + "github.com/disintegration/imaging" + + "github.com/mattermost/mattermost-server/v5/mlog" +) + +const ( + FILEINFO_SORT_BY_CREATED = "CreateAt" + FILEINFO_SORT_BY_SIZE = "Size" +) + +// GetFileInfosOptions contains options for getting FileInfos +type GetFileInfosOptions struct { + // UserIds optionally limits the FileInfos to those created by the given users. + UserIds []string `json:"user_ids"` + // ChannelIds optionally limits the FileInfos to those created in the given channels. + ChannelIds []string `json:"channel_ids"` + // Since optionally limits FileInfos to those created at or after the given time, specified as Unix time in milliseconds. + Since int64 `json:"since"` + // IncludeDeleted if set includes deleted FileInfos. + IncludeDeleted bool `json:"include_deleted"` + // SortBy sorts the FileInfos by this field. The default is to sort by date created. + SortBy string `json:"sort_by"` + // SortDescending changes the sort direction to descending order when true. + SortDescending bool `json:"sort_descending"` +} + +type FileInfo struct { + Id string `json:"id"` + CreatorId string `json:"user_id"` + PostId string `json:"post_id,omitempty"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + DeleteAt int64 `json:"delete_at"` + Path string `json:"-"` // not sent back to the client + ThumbnailPath string `json:"-"` // not sent back to the client + PreviewPath string `json:"-"` // not sent back to the client + Name string `json:"name"` + Extension string `json:"extension"` + Size int64 `json:"size"` + MimeType string `json:"mime_type"` + Width int `json:"width,omitempty"` + Height int `json:"height,omitempty"` + HasPreviewImage bool `json:"has_preview_image,omitempty"` + MiniPreview *[]byte `json:"mini_preview"` // declared as *[]byte to avoid postgres/mysql differences in deserialization + Content string `json:"-"` +} + +func (fi *FileInfo) ToJson() string { + b, _ := json.Marshal(fi) + return string(b) +} + +func FileInfoFromJson(data io.Reader) *FileInfo { + decoder := json.NewDecoder(data) + + var fi FileInfo + if err := decoder.Decode(&fi); err != nil { + return nil + } + return &fi +} + +func FileInfosToJson(infos []*FileInfo) string { + b, _ := json.Marshal(infos) + return string(b) +} + +func FileInfosFromJson(data io.Reader) []*FileInfo { + decoder := json.NewDecoder(data) + + var infos []*FileInfo + if err := decoder.Decode(&infos); err != nil { + return nil + } + return infos +} + +func (fi *FileInfo) PreSave() { + if fi.Id == "" { + fi.Id = NewId() + } + + if fi.CreateAt == 0 { + fi.CreateAt = GetMillis() + } + + if fi.UpdateAt < fi.CreateAt { + fi.UpdateAt = fi.CreateAt + } +} + +func (fi *FileInfo) IsValid() *AppError { + if !IsValidId(fi.Id) { + return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.id.app_error", nil, "", http.StatusBadRequest) + } + + if !IsValidId(fi.CreatorId) && fi.CreatorId != "nouser" { + return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.user_id.app_error", nil, "id="+fi.Id, http.StatusBadRequest) + } + + if fi.PostId != "" && !IsValidId(fi.PostId) { + return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.post_id.app_error", nil, "id="+fi.Id, http.StatusBadRequest) + } + + if fi.CreateAt == 0 { + return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.create_at.app_error", nil, "id="+fi.Id, http.StatusBadRequest) + } + + if fi.UpdateAt == 0 { + return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.update_at.app_error", nil, "id="+fi.Id, http.StatusBadRequest) + } + + if fi.Path == "" { + return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.path.app_error", nil, "id="+fi.Id, http.StatusBadRequest) + } + + return nil +} + +func (fi *FileInfo) IsImage() bool { + return strings.HasPrefix(fi.MimeType, "image") +} + +func NewInfo(name string) *FileInfo { + info := &FileInfo{ + Name: name, + } + + extension := strings.ToLower(filepath.Ext(name)) + info.MimeType = mime.TypeByExtension(extension) + + if extension != "" && extension[0] == '.' { + // The client expects a file extension without the leading period + info.Extension = extension[1:] + } else { + info.Extension = extension + } + + return info +} + +func GenerateMiniPreviewImage(img image.Image) *[]byte { + preview := imaging.Resize(img, 16, 16, imaging.Lanczos) + + buf := new(bytes.Buffer) + + if err := jpeg.Encode(buf, preview, &jpeg.Options{Quality: 90}); err != nil { + mlog.Info("Unable to encode image as mini preview jpg", mlog.Err(err)) + return nil + } + data := buf.Bytes() + return &data +} + +func GetInfoForBytes(name string, data io.ReadSeeker, size int) (*FileInfo, *AppError) { + info := &FileInfo{ + Name: name, + Size: int64(size), + } + var err *AppError + + extension := strings.ToLower(filepath.Ext(name)) + info.MimeType = mime.TypeByExtension(extension) + + if extension != "" && extension[0] == '.' { + // The client expects a file extension without the leading period + info.Extension = extension[1:] + } else { + info.Extension = extension + } + + if info.IsImage() { + // Only set the width and height if it's actually an image that we can understand + if config, _, err := image.DecodeConfig(data); err == nil { + info.Width = config.Width + info.Height = config.Height + + if info.MimeType == "image/gif" { + // Just show the gif itself instead of a preview image for animated gifs + data.Seek(0, io.SeekStart) + gifConfig, err := gif.DecodeAll(data) + if err != nil { + // Still return the rest of the info even though it doesn't appear to be an actual gif + info.HasPreviewImage = true + return info, NewAppError("GetInfoForBytes", "model.file_info.get.gif.app_error", nil, err.Error(), http.StatusBadRequest) + } + info.HasPreviewImage = len(gifConfig.Image) == 1 + } else { + info.HasPreviewImage = true + } + } + } + + return info, err +} + +func GetEtagForFileInfos(infos []*FileInfo) string { + if len(infos) == 0 { + return Etag() + } + + var maxUpdateAt int64 + + for _, info := range infos { + if info.UpdateAt > maxUpdateAt { + maxUpdateAt = info.UpdateAt + } + } + + return Etag(infos[0].PostId, maxUpdateAt) +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/file_info_list.go b/vendor/github.com/mattermost/mattermost-server/v5/model/file_info_list.go new file mode 100644 index 0000000..cd9694f --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/file_info_list.go @@ -0,0 +1,128 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "sort" +) + +type FileInfoList struct { + Order []string `json:"order"` + FileInfos map[string]*FileInfo `json:"file_infos"` + NextFileInfoId string `json:"next_file_info_id"` + PrevFileInfoId string `json:"prev_file_info_id"` +} + +func NewFileInfoList() *FileInfoList { + return &FileInfoList{ + Order: make([]string, 0), + FileInfos: make(map[string]*FileInfo), + NextFileInfoId: "", + PrevFileInfoId: "", + } +} + +func (o *FileInfoList) ToSlice() []*FileInfo { + var fileInfos []*FileInfo + for _, id := range o.Order { + fileInfos = append(fileInfos, o.FileInfos[id]) + } + return fileInfos +} + +func (o *FileInfoList) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) + } +} + +func (o *FileInfoList) MakeNonNil() { + if o.Order == nil { + o.Order = make([]string, 0) + } + + if o.FileInfos == nil { + o.FileInfos = make(map[string]*FileInfo) + } +} + +func (o *FileInfoList) AddOrder(id string) { + if o.Order == nil { + o.Order = make([]string, 0, 128) + } + + o.Order = append(o.Order, id) +} + +func (o *FileInfoList) AddFileInfo(fileInfo *FileInfo) { + if o.FileInfos == nil { + o.FileInfos = make(map[string]*FileInfo) + } + + o.FileInfos[fileInfo.Id] = fileInfo +} + +func (o *FileInfoList) UniqueOrder() { + keys := make(map[string]bool) + order := []string{} + for _, fileInfoId := range o.Order { + if _, value := keys[fileInfoId]; !value { + keys[fileInfoId] = true + order = append(order, fileInfoId) + } + } + + o.Order = order +} + +func (o *FileInfoList) Extend(other *FileInfoList) { + for fileInfoId := range other.FileInfos { + o.AddFileInfo(other.FileInfos[fileInfoId]) + } + + for _, fileInfoId := range other.Order { + o.AddOrder(fileInfoId) + } + + o.UniqueOrder() +} + +func (o *FileInfoList) SortByCreateAt() { + sort.Slice(o.Order, func(i, j int) bool { + return o.FileInfos[o.Order[i]].CreateAt > o.FileInfos[o.Order[j]].CreateAt + }) +} + +func (o *FileInfoList) Etag() string { + id := "0" + var t int64 = 0 + + for _, v := range o.FileInfos { + if v.UpdateAt > t { + t = v.UpdateAt + id = v.Id + } else if v.UpdateAt == t && v.Id > id { + t = v.UpdateAt + id = v.Id + } + } + + orderId := "" + if len(o.Order) > 0 { + orderId = o.Order[0] + } + + return Etag(orderId, id, t) +} + +func FileInfoListFromJson(data io.Reader) *FileInfoList { + var o *FileInfoList + json.NewDecoder(data).Decode(&o) + return o +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/file_info_search_results.go b/vendor/github.com/mattermost/mattermost-server/v5/model/file_info_search_results.go new file mode 100644 index 0000000..90f2922 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/file_info_search_results.go @@ -0,0 +1,37 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type FileInfoSearchMatches map[string][]string + +type FileInfoSearchResults struct { + *FileInfoList + Matches FileInfoSearchMatches `json:"matches"` +} + +func MakeFileInfoSearchResults(fileInfos *FileInfoList, matches FileInfoSearchMatches) *FileInfoSearchResults { + return &FileInfoSearchResults{ + fileInfos, + matches, + } +} + +func (o *FileInfoSearchResults) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } + return string(b) +} + +func FileInfoSearchResultsFromJson(data io.Reader) *FileInfoSearchResults { + var o *FileInfoSearchResults + json.NewDecoder(data).Decode(&o) + return o +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/gitlab.go b/vendor/github.com/mattermost/mattermost-server/v5/model/gitlab.go similarity index 74% rename from vendor/github.com/mattermost/mattermost-server/model/gitlab.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/gitlab.go index 8777614..0b069cd 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/gitlab.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/gitlab.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/group.go b/vendor/github.com/mattermost/mattermost-server/v5/model/group.go new file mode 100644 index 0000000..49783c8 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/group.go @@ -0,0 +1,221 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "net/http" + "regexp" +) + +const ( + GroupSourceLdap GroupSource = "ldap" + + GroupNameMaxLength = 64 + GroupSourceMaxLength = 64 + GroupDisplayNameMaxLength = 128 + GroupDescriptionMaxLength = 1024 + GroupRemoteIDMaxLength = 48 +) + +type GroupSource string + +var allGroupSources = []GroupSource{ + GroupSourceLdap, +} + +var groupSourcesRequiringRemoteID = []GroupSource{ + GroupSourceLdap, +} + +type Group struct { + Id string `json:"id"` + Name *string `json:"name,omitempty"` + DisplayName string `json:"display_name"` + Description string `json:"description"` + Source GroupSource `json:"source"` + RemoteId string `json:"remote_id"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + DeleteAt int64 `json:"delete_at"` + HasSyncables bool `db:"-" json:"has_syncables"` + MemberCount *int `db:"-" json:"member_count,omitempty"` + AllowReference bool `json:"allow_reference"` +} + +type GroupWithSchemeAdmin struct { + Group + SchemeAdmin *bool `db:"SyncableSchemeAdmin" json:"scheme_admin,omitempty"` +} + +type GroupsAssociatedToChannelWithSchemeAdmin struct { + ChannelId string `json:"channel_id"` + Group + SchemeAdmin *bool `db:"SyncableSchemeAdmin" json:"scheme_admin,omitempty"` +} +type GroupsAssociatedToChannel struct { + ChannelId string `json:"channel_id"` + Groups []*GroupWithSchemeAdmin `json:"groups"` +} + +type GroupPatch struct { + Name *string `json:"name"` + DisplayName *string `json:"display_name"` + Description *string `json:"description"` + AllowReference *bool `json:"allow_reference"` +} + +type LdapGroupSearchOpts struct { + Q string + IsLinked *bool + IsConfigured *bool +} + +type GroupSearchOpts struct { + Q string + NotAssociatedToTeam string + NotAssociatedToChannel string + IncludeMemberCount bool + FilterAllowReference bool + PageOpts *PageOpts + Since int64 + + // FilterParentTeamPermitted filters the groups to the intersect of the + // set associated to the parent team and those returned by the query. + // If the parent team is not group-constrained or if NotAssociatedToChannel + // is not set then this option is ignored. + FilterParentTeamPermitted bool +} + +type PageOpts struct { + Page int + PerPage int +} + +type GroupStats struct { + GroupID string `json:"group_id"` + TotalMemberCount int64 `json:"total_member_count"` +} + +func (group *Group) Patch(patch *GroupPatch) { + if patch.Name != nil { + group.Name = patch.Name + } + if patch.DisplayName != nil { + group.DisplayName = *patch.DisplayName + } + if patch.Description != nil { + group.Description = *patch.Description + } + if patch.AllowReference != nil { + group.AllowReference = *patch.AllowReference + } +} + +func (group *Group) IsValidForCreate() *AppError { + err := group.IsValidName() + if err != nil { + return err + } + + if l := len(group.DisplayName); l == 0 || l > GroupDisplayNameMaxLength { + return NewAppError("Group.IsValidForCreate", "model.group.display_name.app_error", map[string]interface{}{"GroupDisplayNameMaxLength": GroupDisplayNameMaxLength}, "", http.StatusBadRequest) + } + + if len(group.Description) > GroupDescriptionMaxLength { + return NewAppError("Group.IsValidForCreate", "model.group.description.app_error", map[string]interface{}{"GroupDescriptionMaxLength": GroupDescriptionMaxLength}, "", http.StatusBadRequest) + } + + isValidSource := false + for _, groupSource := range allGroupSources { + if group.Source == groupSource { + isValidSource = true + break + } + } + if !isValidSource { + return NewAppError("Group.IsValidForCreate", "model.group.source.app_error", nil, "", http.StatusBadRequest) + } + + if len(group.RemoteId) > GroupRemoteIDMaxLength || (len(group.RemoteId) == 0 && group.requiresRemoteId()) { + return NewAppError("Group.IsValidForCreate", "model.group.remote_id.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (group *Group) requiresRemoteId() bool { + for _, groupSource := range groupSourcesRequiringRemoteID { + if groupSource == group.Source { + return true + } + } + return false +} + +func (group *Group) IsValidForUpdate() *AppError { + if !IsValidId(group.Id) { + return NewAppError("Group.IsValidForUpdate", "app.group.id.app_error", nil, "", http.StatusBadRequest) + } + if group.CreateAt == 0 { + return NewAppError("Group.IsValidForUpdate", "model.group.create_at.app_error", nil, "", http.StatusBadRequest) + } + if group.UpdateAt == 0 { + return NewAppError("Group.IsValidForUpdate", "model.group.update_at.app_error", nil, "", http.StatusBadRequest) + } + if err := group.IsValidForCreate(); err != nil { + return err + } + return nil +} + +func (group *Group) ToJson() string { + b, _ := json.Marshal(group) + return string(b) +} + +var validGroupnameChars = regexp.MustCompile(`^[a-z0-9\.\-_]+$`) + +func (group *Group) IsValidName() *AppError { + + if group.Name == nil { + if group.AllowReference { + return NewAppError("Group.IsValidName", "model.group.name.app_error", map[string]interface{}{"GroupNameMaxLength": GroupNameMaxLength}, "", http.StatusBadRequest) + } + } else { + if l := len(*group.Name); l == 0 || l > GroupNameMaxLength { + return NewAppError("Group.IsValidName", "model.group.name.invalid_length.app_error", map[string]interface{}{"GroupNameMaxLength": GroupNameMaxLength}, "", http.StatusBadRequest) + } + + if !validGroupnameChars.MatchString(*group.Name) { + return NewAppError("Group.IsValidName", "model.group.name.invalid_chars.app_error", nil, "", http.StatusBadRequest) + } + } + return nil +} + +func GroupFromJson(data io.Reader) *Group { + var group *Group + json.NewDecoder(data).Decode(&group) + return group +} + +func GroupsFromJson(data io.Reader) []*Group { + var groups []*Group + json.NewDecoder(data).Decode(&groups) + return groups +} + +func GroupPatchFromJson(data io.Reader) *GroupPatch { + var groupPatch *GroupPatch + json.NewDecoder(data).Decode(&groupPatch) + return groupPatch +} + +func GroupStatsFromJson(data io.Reader) *GroupStats { + var groupStats *GroupStats + json.NewDecoder(data).Decode(&groupStats) + return groupStats +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/group_member.go b/vendor/github.com/mattermost/mattermost-server/v5/model/group_member.go similarity index 83% rename from vendor/github.com/mattermost/mattermost-server/model/group_member.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/group_member.go index 0f1a0ba..d18d784 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/group_member.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/group_member.go @@ -1,5 +1,5 @@ -// Copyright (c) 2018-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model diff --git a/vendor/github.com/mattermost/mattermost-server/model/group_syncable.go b/vendor/github.com/mattermost/mattermost-server/v5/model/group_syncable.go similarity index 88% rename from vendor/github.com/mattermost/mattermost-server/model/group_syncable.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/group_syncable.go index 038164f..6a4d402 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/group_syncable.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/group_syncable.go @@ -1,5 +1,5 @@ -// Copyright (c) 2018-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -29,11 +29,12 @@ type GroupSyncable struct { // TeamId. SyncableId string `db:"-" json:"-"` - AutoAdd bool `json:"auto_add"` - CreateAt int64 `json:"create_at"` - DeleteAt int64 `json:"delete_at"` - UpdateAt int64 `json:"update_at"` - Type GroupSyncableType `db:"-" json:"-"` + AutoAdd bool `json:"auto_add"` + SchemeAdmin bool `json:"scheme_admin"` + CreateAt int64 `json:"create_at"` + DeleteAt int64 `json:"delete_at"` + UpdateAt int64 `json:"update_at"` + Type GroupSyncableType `db:"-" json:"-"` // Values joined in from the associated team and/or channel ChannelDisplayName string `db:"-" json:"-"` @@ -123,13 +124,17 @@ func (syncable *GroupSyncable) MarshalJSON() ([]byte, error) { } type GroupSyncablePatch struct { - AutoAdd *bool `json:"auto_add"` + AutoAdd *bool `json:"auto_add"` + SchemeAdmin *bool `json:"scheme_admin"` } func (syncable *GroupSyncable) Patch(patch *GroupSyncablePatch) { if patch.AutoAdd != nil { syncable.AutoAdd = *patch.AutoAdd } + if patch.SchemeAdmin != nil { + syncable.SchemeAdmin = *patch.SchemeAdmin + } } type UserTeamIDPair struct { diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/guest_invite.go b/vendor/github.com/mattermost/mattermost-server/v5/model/guest_invite.go new file mode 100644 index 0000000..3cdd489 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/guest_invite.go @@ -0,0 +1,53 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "net/http" +) + +type GuestsInvite struct { + Emails []string `json:"emails"` + Channels []string `json:"channels"` + Message string `json:"message"` +} + +// IsValid validates the user and returns an error if it isn't configured +// correctly. +func (i *GuestsInvite) IsValid() *AppError { + if len(i.Emails) == 0 { + return NewAppError("GuestsInvite.IsValid", "model.guest.is_valid.emails.app_error", nil, "", http.StatusBadRequest) + } + + for _, email := range i.Emails { + if len(email) > USER_EMAIL_MAX_LENGTH || len(email) == 0 || !IsValidEmail(email) { + return NewAppError("GuestsInvite.IsValid", "model.guest.is_valid.email.app_error", nil, "email="+email, http.StatusBadRequest) + } + } + + if len(i.Channels) == 0 { + return NewAppError("GuestsInvite.IsValid", "model.guest.is_valid.channels.app_error", nil, "", http.StatusBadRequest) + } + + for _, channel := range i.Channels { + if len(channel) != 26 { + return NewAppError("GuestsInvite.IsValid", "model.guest.is_valid.channel.app_error", nil, "channel="+channel, http.StatusBadRequest) + } + } + return nil +} + +// GuestsInviteFromJson will decode the input and return a GuestsInvite +func GuestsInviteFromJson(data io.Reader) *GuestsInvite { + var i *GuestsInvite + json.NewDecoder(data).Decode(&i) + return i +} + +func (i *GuestsInvite) ToJson() string { + b, _ := json.Marshal(i) + return string(b) +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/incoming_webhook.go b/vendor/github.com/mattermost/mattermost-server/v5/model/incoming_webhook.go similarity index 96% rename from vendor/github.com/mattermost/mattermost-server/model/incoming_webhook.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/incoming_webhook.go index 8806a73..f8fffe2 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/incoming_webhook.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/incoming_webhook.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -65,7 +65,7 @@ func IncomingWebhookListFromJson(data io.Reader) []*IncomingWebhook { func (o *IncomingWebhook) IsValid() *AppError { - if len(o.Id) != 26 { + if !IsValidId(o.Id) { return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.id.app_error", nil, "", http.StatusBadRequest) } @@ -78,15 +78,15 @@ func (o *IncomingWebhook) IsValid() *AppError { return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.update_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) } - if len(o.UserId) != 26 { + if !IsValidId(o.UserId) { return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.user_id.app_error", nil, "", http.StatusBadRequest) } - if len(o.ChannelId) != 26 { + if !IsValidId(o.ChannelId) { return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.channel_id.app_error", nil, "", http.StatusBadRequest) } - if len(o.TeamId) != 26 { + if !IsValidId(o.TeamId) { return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.team_id.app_error", nil, "", http.StatusBadRequest) } @@ -182,9 +182,8 @@ func decodeIncomingWebhookRequest(by []byte) (*IncomingWebhookRequest, error) { err := decoder.Decode(&o) if err == nil { return &o, nil - } else { - return nil, err } + return nil, err } func IncomingWebhookRequestFromJson(data io.Reader) (*IncomingWebhookRequest, *AppError) { @@ -211,7 +210,6 @@ func (o *IncomingWebhookRequest) ToJson() string { b, err := json.Marshal(o) if err != nil { return "" - } else { - return string(b) } + return string(b) } diff --git a/vendor/github.com/mattermost/mattermost-server/model/initial_load.go b/vendor/github.com/mattermost/mattermost-server/v5/model/initial_load.go similarity index 67% rename from vendor/github.com/mattermost/mattermost-server/model/initial_load.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/initial_load.go index 3be6804..c533faa 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/initial_load.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/initial_load.go @@ -1,5 +1,5 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -18,13 +18,13 @@ type InitialLoad struct { NoAccounts bool `json:"no_accounts"` } -func (me *InitialLoad) ToJson() string { - b, _ := json.Marshal(me) +func (il *InitialLoad) ToJson() string { + b, _ := json.Marshal(il) return string(b) } func InitialLoadFromJson(data io.Reader) *InitialLoad { - var o *InitialLoad - json.NewDecoder(data).Decode(&o) - return o + var il *InitialLoad + json.NewDecoder(data).Decode(&il) + return il } diff --git a/vendor/github.com/mattermost/mattermost-server/model/integration_action.go b/vendor/github.com/mattermost/mattermost-server/v5/model/integration_action.go similarity index 84% rename from vendor/github.com/mattermost/mattermost-server/model/integration_action.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/integration_action.go index 4490e4f..7124a7e 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/integration_action.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/integration_action.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -16,6 +16,7 @@ import ( "io" "math/big" "net/http" + "reflect" "strconv" "strings" ) @@ -44,13 +45,20 @@ type PostAction struct { // The text on the button, or in the select placeholder. Name string `json:"name,omitempty"` + // If the action is disabled. + Disabled bool `json:"disabled,omitempty"` + + // Style defines a text and border style. + // Supported values are "default", "primary", "success", "good", "warning", "danger" + // and any hex color. + Style string `json:"style,omitempty"` + // DataSource indicates the data source for the select action. If left // empty, the select is populated from Options. Other supported values // are "users" and "channels". DataSource string `json:"data_source,omitempty"` - // Options contains either the buttons that will be displayed on the post - // or the values listed in a select dropdowon on the post. + // Options contains the values listed in a select dropdown on the post. Options []*PostActionOptions `json:"options,omitempty"` // DefaultOption contains the option, if any, that will appear as the @@ -117,13 +125,19 @@ func (p *PostAction) Equals(input *PostAction) bool { for key, value := range p.Integration.Context { inputValue, ok := input.Integration.Context[key] - if !ok { return false } - if value != inputValue { - return false + switch inputValue.(type) { + case string, bool, int, float64: + if value != inputValue { + return false + } + default: + if !reflect.DeepEqual(value, inputValue) { + return false + } } } @@ -157,19 +171,23 @@ type PostActionIntegration struct { } type PostActionIntegrationRequest struct { - UserId string `json:"user_id"` - ChannelId string `json:"channel_id"` - TeamId string `json:"team_id"` - PostId string `json:"post_id"` - TriggerId string `json:"trigger_id"` - Type string `json:"type"` - DataSource string `json:"data_source"` - Context map[string]interface{} `json:"context,omitempty"` + UserId string `json:"user_id"` + UserName string `json:"user_name"` + ChannelId string `json:"channel_id"` + ChannelName string `json:"channel_name"` + TeamId string `json:"team_id"` + TeamName string `json:"team_domain"` + PostId string `json:"post_id"` + TriggerId string `json:"trigger_id"` + Type string `json:"type"` + DataSource string `json:"data_source"` + Context map[string]interface{} `json:"context,omitempty"` } type PostActionIntegrationResponse struct { - Update *Post `json:"update"` - EphemeralText string `json:"ephemeral_text"` + Update *Post `json:"update"` + EphemeralText string `json:"ephemeral_text"` + SkipSlackParsing bool `json:"skip_slack_parsing"` // Set to `true` to skip the Slack-compatibility handling of Text. } type PostActionAPIResponse struct { @@ -178,13 +196,14 @@ type PostActionAPIResponse struct { } type Dialog struct { - CallbackId string `json:"callback_id"` - Title string `json:"title"` - IconURL string `json:"icon_url"` - Elements []DialogElement `json:"elements"` - SubmitLabel string `json:"submit_label"` - NotifyOnCancel bool `json:"notify_on_cancel"` - State string `json:"state"` + CallbackId string `json:"callback_id"` + Title string `json:"title"` + IntroductionText string `json:"introduction_text"` + IconURL string `json:"icon_url"` + Elements []DialogElement `json:"elements"` + SubmitLabel string `json:"submit_label"` + NotifyOnCancel bool `json:"notify_on_cancel"` + State string `json:"state"` } type DialogElement struct { @@ -221,6 +240,7 @@ type SubmitDialogRequest struct { } type SubmitDialogResponse struct { + Error string `json:"error,omitempty"` Errors map[string]string `json:"errors,omitempty"` } @@ -282,7 +302,7 @@ func DecodeAndVerifyTriggerId(triggerId string, s *ecdsa.PrivateKey) (string, st R, S *big.Int } - if _, err := asn1.Unmarshal([]byte(signature), &esig); err != nil { + if _, err := asn1.Unmarshal(signature, &esig); err != nil { return "", "", NewAppError("DecodeAndVerifyTriggerId", "interactive_message.decode_trigger_id.signature_decode_failed", nil, err.Error(), http.StatusBadRequest) } @@ -361,8 +381,8 @@ func (r *SubmitDialogResponse) ToJson() []byte { func (o *Post) StripActionIntegrations() { attachments := o.Attachments() - if o.Props["attachments"] != nil { - o.Props["attachments"] = attachments + if o.GetProp("attachments") != nil { + o.AddProp("attachments", attachments) } for _, attachment := range attachments { for _, action := range attachment.Actions { @@ -374,7 +394,7 @@ func (o *Post) StripActionIntegrations() { func (o *Post) GetAction(id string) *PostAction { for _, attachment := range o.Attachments() { for _, action := range attachment.Actions { - if action.Id == id { + if action != nil && action.Id == id { return action } } @@ -383,13 +403,13 @@ func (o *Post) GetAction(id string) *PostAction { } func (o *Post) GenerateActionIds() { - if o.Props["attachments"] != nil { - o.Props["attachments"] = o.Attachments() + if o.GetProp("attachments") != nil { + o.AddProp("attachments", o.Attachments()) } - if attachments, ok := o.Props["attachments"].([]*SlackAttachment); ok { + if attachments, ok := o.GetProp("attachments").([]*SlackAttachment); ok { for _, attachment := range attachments { for _, action := range attachment.Actions { - if action.Id == "" { + if action != nil && action.Id == "" { action.Id = NewId() } } @@ -404,7 +424,7 @@ func AddPostActionCookies(o *Post, secret []byte) *Post { retainProps := map[string]interface{}{} removeProps := []string{} for _, key := range PostActionRetainPropKeys { - value, ok := p.Props[key] + value, ok := p.GetProps()[key] if ok { retainProps[key] = value } else { diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/integrity.go b/vendor/github.com/mattermost/mattermost-server/v5/model/integrity.go new file mode 100644 index 0000000..744ad07 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/integrity.go @@ -0,0 +1,58 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "errors" +) + +type OrphanedRecord struct { + ParentId *string `json:"parent_id"` + ChildId *string `json:"child_id"` +} + +type RelationalIntegrityCheckData struct { + ParentName string `json:"parent_name"` + ChildName string `json:"child_name"` + ParentIdAttr string `json:"parent_id_attr"` + ChildIdAttr string `json:"child_id_attr"` + Records []OrphanedRecord `json:"records"` +} + +type IntegrityCheckResult struct { + Data interface{} `json:"data"` + Err error `json:"err"` +} + +func (r *IntegrityCheckResult) UnmarshalJSON(b []byte) error { + var data map[string]interface{} + if err := json.Unmarshal(b, &data); err != nil { + return err + } + if d, ok := data["data"]; ok && d != nil { + var rdata RelationalIntegrityCheckData + m := d.(map[string]interface{}) + rdata.ParentName = m["parent_name"].(string) + rdata.ChildName = m["child_name"].(string) + rdata.ParentIdAttr = m["parent_id_attr"].(string) + rdata.ChildIdAttr = m["child_id_attr"].(string) + for _, recData := range m["records"].([]interface{}) { + var record OrphanedRecord + m := recData.(map[string]interface{}) + if val := m["parent_id"]; val != nil { + record.ParentId = NewString(val.(string)) + } + if val := m["child_id"]; val != nil { + record.ChildId = NewString(val.(string)) + } + rdata.Records = append(rdata.Records, record) + } + r.Data = rdata + } + if err, ok := data["err"]; ok && err != nil { + r.Err = errors.New(data["err"].(string)) + } + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/job.go b/vendor/github.com/mattermost/mattermost-server/v5/model/job.go similarity index 77% rename from vendor/github.com/mattermost/mattermost-server/model/job.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/job.go index 2533ea7..0c006ea 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/job.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/job.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -15,9 +15,15 @@ const ( JOB_TYPE_MESSAGE_EXPORT = "message_export" JOB_TYPE_ELASTICSEARCH_POST_INDEXING = "elasticsearch_post_indexing" JOB_TYPE_ELASTICSEARCH_POST_AGGREGATION = "elasticsearch_post_aggregation" + JOB_TYPE_BLEVE_POST_INDEXING = "bleve_post_indexing" JOB_TYPE_LDAP_SYNC = "ldap_sync" JOB_TYPE_MIGRATIONS = "migrations" JOB_TYPE_PLUGINS = "plugins" + JOB_TYPE_EXPIRY_NOTIFY = "expiry_notify" + JOB_TYPE_PRODUCT_NOTICES = "product_notices" + JOB_TYPE_ACTIVE_USERS = "active_users" + JOB_TYPE_IMPORT_PROCESS = "import_process" + JOB_TYPE_CLOUD = "cloud" JOB_STATUS_PENDING = "pending" JOB_STATUS_IN_PROGRESS = "in_progress" @@ -25,6 +31,7 @@ const ( JOB_STATUS_ERROR = "error" JOB_STATUS_CANCEL_REQUESTED = "cancel_requested" JOB_STATUS_CANCELED = "canceled" + JOB_STATUS_WARNING = "warning" ) type Job struct { @@ -40,7 +47,7 @@ type Job struct { } func (j *Job) IsValid() *AppError { - if len(j.Id) != 26 { + if !IsValidId(j.Id) { return NewAppError("Job.IsValid", "model.job.is_valid.id.app_error", nil, "id="+j.Id, http.StatusBadRequest) } @@ -52,10 +59,16 @@ func (j *Job) IsValid() *AppError { case JOB_TYPE_DATA_RETENTION: case JOB_TYPE_ELASTICSEARCH_POST_INDEXING: case JOB_TYPE_ELASTICSEARCH_POST_AGGREGATION: + case JOB_TYPE_BLEVE_POST_INDEXING: case JOB_TYPE_LDAP_SYNC: case JOB_TYPE_MESSAGE_EXPORT: case JOB_TYPE_MIGRATIONS: case JOB_TYPE_PLUGINS: + case JOB_TYPE_PRODUCT_NOTICES: + case JOB_TYPE_EXPIRY_NOTIFY: + case JOB_TYPE_ACTIVE_USERS: + case JOB_TYPE_IMPORT_PROCESS: + case JOB_TYPE_CLOUD: default: return NewAppError("Job.IsValid", "model.job.is_valid.type.app_error", nil, "id="+j.Id, http.StatusBadRequest) } @@ -74,8 +87,8 @@ func (j *Job) IsValid() *AppError { return nil } -func (js *Job) ToJson() string { - b, _ := json.Marshal(js) +func (j *Job) ToJson() string { + b, _ := json.Marshal(j) return string(b) } @@ -83,9 +96,8 @@ func JobFromJson(data io.Reader) *Job { var job Job if err := json.NewDecoder(data).Decode(&job); err == nil { return &job - } else { - return nil } + return nil } func JobsToJson(jobs []*Job) string { @@ -97,13 +109,12 @@ func JobsFromJson(data io.Reader) []*Job { var jobs []*Job if err := json.NewDecoder(data).Decode(&jobs); err == nil { return jobs - } else { - return nil } + return nil } -func (js *Job) DataToJson() string { - b, _ := json.Marshal(js.Data) +func (j *Job) DataToJson() string { + b, _ := json.Marshal(j.Data) return string(b) } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/ldap.go b/vendor/github.com/mattermost/mattermost-server/v5/model/ldap.go new file mode 100644 index 0000000..1262dfb --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/ldap.go @@ -0,0 +1,10 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +const ( + USER_AUTH_SERVICE_LDAP = "ldap" + LDAP_PUBLIC_CERTIFICATE_NAME = "ldap-public.crt" + LDAP_PRIVATE_KEY_NAME = "ldap-private.key" +) diff --git a/vendor/github.com/mattermost/mattermost-server/model/license.go b/vendor/github.com/mattermost/mattermost-server/v5/model/license.go similarity index 65% rename from vendor/github.com/mattermost/mattermost-server/model/license.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/license.go index 7ab21de..1b324ad 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/license.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/license.go @@ -1,5 +1,5 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -12,6 +12,8 @@ import ( const ( EXPIRED_LICENSE_ERROR = "api.license.add_license.expired.app_error" INVALID_LICENSE_ERROR = "api.license.add_license.invalid.app_error" + LICENSE_GRACE_PERIOD = 1000 * 60 * 60 * 24 * 10 //10 days + LICENSE_RENEWAL_LINK = "https://mattermost.com/renew/" ) type LicenseRecord struct { @@ -32,11 +34,26 @@ type License struct { } type Customer struct { - Id string `json:"id"` - Name string `json:"name"` - Email string `json:"email"` - Company string `json:"company"` - PhoneNumber string `json:"phone_number"` + Id string `json:"id"` + Name string `json:"name"` + Email string `json:"email"` + Company string `json:"company"` +} + +type TrialLicenseRequest struct { + ServerID string `json:"server_id"` + Email string `json:"email"` + Name string `json:"name"` + SiteURL string `json:"site_url"` + SiteName string `json:"site_name"` + Users int `json:"users"` + TermsAccepted bool `json:"terms_accepted"` + ReceiveEmailsAccepted bool `json:"receive_emails_accepted"` +} + +func (tlr *TrialLicenseRequest) ToJson() string { + b, _ := json.Marshal(tlr) + return string(b) } type Features struct { @@ -46,6 +63,7 @@ type Features struct { MFA *bool `json:"mfa"` GoogleOAuth *bool `json:"google_oauth"` Office365OAuth *bool `json:"office365_oauth"` + OpenId *bool `json:"openid"` Compliance *bool `json:"compliance"` Cluster *bool `json:"cluster"` Metrics *bool `json:"metrics"` @@ -59,6 +77,15 @@ type Features struct { MessageExport *bool `json:"message_export"` CustomPermissionsSchemes *bool `json:"custom_permissions_schemes"` CustomTermsOfService *bool `json:"custom_terms_of_service"` + GuestAccounts *bool `json:"guest_accounts"` + GuestAccountsPermissions *bool `json:"guest_accounts_permissions"` + IDLoadedPushNotifications *bool `json:"id_loaded"` + LockTeammateNameDisplay *bool `json:"lock_teammate_name_display"` + EnterprisePlugins *bool `json:"enterprise_plugins"` + AdvancedLogging *bool `json:"advanced_logging"` + Cloud *bool `json:"cloud"` + SharedChannels *bool `json:"shared_channels"` + RemoteClusterService *bool `json:"remote_cluster_service"` // after we enabled more features we'll need to control them with this FutureFeatures *bool `json:"future_features"` @@ -71,6 +98,7 @@ func (f *Features) ToMap() map[string]interface{} { "mfa": *f.MFA, "google": *f.GoogleOAuth, "office365": *f.Office365OAuth, + "openid": *f.OpenId, "compliance": *f.Compliance, "cluster": *f.Cluster, "metrics": *f.Metrics, @@ -81,6 +109,15 @@ func (f *Features) ToMap() map[string]interface{} { "data_retention": *f.DataRetention, "message_export": *f.MessageExport, "custom_permissions_schemes": *f.CustomPermissionsSchemes, + "guest_accounts": *f.GuestAccounts, + "guest_accounts_permissions": *f.GuestAccountsPermissions, + "id_loaded": *f.IDLoadedPushNotifications, + "lock_teammate_name_display": *f.LockTeammateNameDisplay, + "enterprise_plugins": *f.EnterprisePlugins, + "advanced_logging": *f.AdvancedLogging, + "cloud": *f.Cloud, + "shared_channels": *f.SharedChannels, + "remote_cluster_service": *f.RemoteClusterService, "future": *f.FutureFeatures, } } @@ -114,6 +151,10 @@ func (f *Features) SetDefaults() { f.Office365OAuth = NewBool(*f.FutureFeatures) } + if f.OpenId == nil { + f.OpenId = NewBool(*f.FutureFeatures) + } + if f.Compliance == nil { f.Compliance = NewBool(*f.FutureFeatures) } @@ -162,15 +203,56 @@ func (f *Features) SetDefaults() { f.CustomPermissionsSchemes = NewBool(*f.FutureFeatures) } + if f.GuestAccounts == nil { + f.GuestAccounts = NewBool(*f.FutureFeatures) + } + + if f.GuestAccountsPermissions == nil { + f.GuestAccountsPermissions = NewBool(*f.FutureFeatures) + } + if f.CustomTermsOfService == nil { f.CustomTermsOfService = NewBool(*f.FutureFeatures) } + + if f.IDLoadedPushNotifications == nil { + f.IDLoadedPushNotifications = NewBool(*f.FutureFeatures) + } + + if f.LockTeammateNameDisplay == nil { + f.LockTeammateNameDisplay = NewBool(*f.FutureFeatures) + } + + if f.EnterprisePlugins == nil { + f.EnterprisePlugins = NewBool(*f.FutureFeatures) + } + + if f.AdvancedLogging == nil { + f.AdvancedLogging = NewBool(*f.FutureFeatures) + } + + if f.Cloud == nil { + f.Cloud = NewBool(false) + } + + if f.SharedChannels == nil { + f.SharedChannels = NewBool(*f.FutureFeatures) + } + + if f.RemoteClusterService == nil { + f.RemoteClusterService = f.SharedChannels + } } func (l *License) IsExpired() bool { return l.ExpiresAt < GetMillis() } +func (l *License) IsPastGracePeriod() bool { + timeDiff := GetMillis() - l.ExpiresAt + return timeDiff > LICENSE_GRACE_PERIOD +} + func (l *License) IsStarted() bool { return l.StartsAt < GetMillis() } @@ -206,7 +288,7 @@ func LicenseFromJson(data io.Reader) *License { } func (lr *LicenseRecord) IsValid() *AppError { - if len(lr.Id) != 26 { + if !IsValidId(lr.Id) { return NewAppError("LicenseRecord.IsValid", "model.license_record.is_valid.id.app_error", nil, "", http.StatusBadRequest) } diff --git a/vendor/github.com/mattermost/mattermost-server/model/link_metadata.go b/vendor/github.com/mattermost/mattermost-server/v5/model/link_metadata.go similarity index 95% rename from vendor/github.com/mattermost/mattermost-server/model/link_metadata.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/link_metadata.go index 72f0178..6c3e0bd 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/link_metadata.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/link_metadata.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -56,9 +56,7 @@ func firstNImages(images []*opengraph.Image, maxImages int) []*opengraph.Image { } numImages := len(images) if numImages > maxImages { - subImages := make([]*opengraph.Image, maxImages) - subImages = images[0:maxImages] - return subImages + return images[0:maxImages] } return images } @@ -173,9 +171,9 @@ func (o *LinkMetadata) DeserializeDataToConcreteType() error { // FloorToNearestHour takes a timestamp (in milliseconds) and returns it rounded to the previous hour in UTC. func FloorToNearestHour(ms int64) int64 { - t := time.Unix(0, ms*int64(1000*1000)) + t := time.Unix(0, ms*int64(1000*1000)).UTC() - return time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, t.Location()).UnixNano() / int64(time.Millisecond) + return time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, time.UTC).UnixNano() / int64(time.Millisecond) } // isRoundedToNearestHour returns true if the given timestamp (in milliseconds) has been rounded to the nearest hour in UTC. diff --git a/vendor/github.com/mattermost/mattermost-server/model/manifest.go b/vendor/github.com/mattermost/mattermost-server/v5/model/manifest.go similarity index 56% rename from vendor/github.com/mattermost/mattermost-server/model/manifest.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/manifest.go index 0eb6453..3fba089 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/manifest.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/manifest.go @@ -1,11 +1,10 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model import ( "encoding/json" - "errors" "fmt" "io" "io/ioutil" @@ -14,6 +13,7 @@ import ( "strings" "github.com/blang/semver" + "github.com/pkg/errors" "gopkg.in/yaml.v2" ) @@ -25,6 +25,20 @@ type PluginOption struct { Value string `json:"value" yaml:"value"` } +type PluginSettingType int + +const ( + Bool PluginSettingType = iota + Dropdown + Generated + Radio + Text + LongText + Number + Username + Custom +) + type PluginSetting struct { // The key that the setting will be assigned to in the configuration file. Key string `json:"key" yaml:"key"` @@ -49,7 +63,11 @@ type PluginSetting struct { // // "longtext" will result in a multi line string that can be typed in manually. // + // "number" will result in in integer setting that can be typed in manually. + // // "username" will result in a text setting that will autocomplete to a username. + // + // "custom" will result in a custom defined setting and will load the custom component registered for the Web App System Console. Type string `json:"type" yaml:"type"` // The help text to display to the user. Supports Markdown formatting. @@ -58,7 +76,7 @@ type PluginSetting struct { // The help text to display alongside the "Regenerate" button for settings of the "generated" type. RegenerateHelpText string `json:"regenerate_help_text,omitempty" yaml:"regenerate_help_text,omitempty"` - // The placeholder to display for "text", "generated" and "username" types when blank. + // The placeholder to display for "generated", "text", "longtext", "number" and "username" types when blank. Placeholder string `json:"placeholder" yaml:"placeholder"` // The default value of the setting. @@ -91,6 +109,10 @@ type PluginSettingsSchema struct { // "id": "com.mycompany.myplugin", // "name": "My Plugin", // "description": "This is my plugin", +// "homepage_url": "https://example.com", +// "support_url": "https://example.com/support", +// "release_notes_url": "https://example.com/releases/v0.0.1", +// "icon_path": "assets/logo.svg", // "version": "0.1.0", // "min_server_version": "5.6.0", // "server": { @@ -125,11 +147,24 @@ type Manifest struct { Id string `json:"id" yaml:"id"` // The name to be displayed for the plugin. - Name string `json:"name,omitempty" yaml:"name,omitempty"` + Name string `json:"name" yaml:"name"` // A description of what your plugin is and does. Description string `json:"description,omitempty" yaml:"description,omitempty"` + // HomepageURL is an optional link to learn more about the plugin. + HomepageURL string `json:"homepage_url,omitempty" yaml:"homepage_url,omitempty"` + + // SupportURL is an optional URL where plugin issues can be reported. + SupportURL string `json:"support_url,omitempty" yaml:"support_url,omitempty"` + + // ReleaseNotesURL is an optional URL where a changelog for the release can be found. + ReleaseNotesURL string `json:"release_notes_url,omitempty" yaml:"release_notes_url,omitempty"` + + // A relative file path in the bundle that points to the plugins svg icon for use with the Plugin Marketplace. + // This should be relative to the root of your bundle and the location of the manifest file. Bitmap image formats are not supported. + IconPath string `json:"icon_path,omitempty" yaml:"icon_path,omitempty"` + // A version number for your plugin. Semantic versioning is recommended: http://semver.org Version string `json:"version" yaml:"version"` @@ -153,12 +188,29 @@ type Manifest struct { // Plugins can store any kind of data in Props to allow other plugins to use it. Props map[string]interface{} `json:"props,omitempty" yaml:"props,omitempty"` + + // RequiredConfig defines any required server configuration fields for the plugin to function properly. + // + // Use the plugin helpers CheckRequiredServerConfiguration method to enforce this. + RequiredConfig *Config `json:"required_configuration,omitempty" yaml:"required_configuration,omitempty"` } type ManifestServer struct { - // Executables are the paths to your executable binaries, specifying multiple entry points - // for different platforms when bundled together in a single plugin. - Executables *ManifestExecutables `json:"executables,omitempty" yaml:"executables,omitempty"` + // AllExecutables are the paths to your executable binaries, specifying multiple entry + // points for different platforms when bundled together in a single plugin. + AllExecutables map[string]string `json:"executables,omitempty" yaml:"executables,omitempty"` + + // Executables is a legacy field populated with a subset of supported platform executables. + // When unmarshalling, Executables is authoritative for the platform executable paths it + // contains, overriding any values in AllExecutables. When marshalling, AllExecutables + // is authoritative. + // + // Code duplication is avoided when (un)marshalling by leveraging type aliases in the + // various (Un)Marshal(JSON|YAML) methods, since aliases don't inherit the aliased type's + // methods. + // + // In v6.0, we should remove this field and rename AllExecutables back to Executables. + Executables *ManifestExecutables `json:"-" yaml:"-"` // Executable is the path to your executable binary. This should be relative to the root // of your bundle and the location of the manifest file. @@ -170,6 +222,79 @@ type ManifestServer struct { Executable string `json:"executable" yaml:"executable"` } +func (ms *ManifestServer) MarshalJSON() ([]byte, error) { + type auxManifestServer ManifestServer + + // Populate AllExecutables from Executables, if it exists. + if ms.Executables != nil { + if ms.AllExecutables == nil { + ms.AllExecutables = make(map[string]string) + } + + ms.AllExecutables["linux-amd64"] = ms.Executables.LinuxAmd64 + ms.AllExecutables["darwin-amd64"] = ms.Executables.DarwinAmd64 + ms.AllExecutables["windows-amd64"] = ms.Executables.WindowsAmd64 + } + + return json.Marshal((*auxManifestServer)(ms)) +} + +func (ms *ManifestServer) UnmarshalJSON(data []byte) error { + type auxManifestServer ManifestServer + + aux := (*auxManifestServer)(ms) + if err := json.Unmarshal(data, aux); err != nil { + return err + } + + if len(aux.AllExecutables) > 0 { + ms.Executables = &ManifestExecutables{ + LinuxAmd64: aux.AllExecutables["linux-amd64"], + DarwinAmd64: aux.AllExecutables["darwin-amd64"], + WindowsAmd64: aux.AllExecutables["windows-amd64"], + } + } + + return nil +} + +func (ms *ManifestServer) MarshalYAML() ([]byte, error) { + type auxManifestServer ManifestServer + + // Populate AllExecutables from Executables, if it exists. + if ms.Executables != nil { + if ms.AllExecutables == nil { + ms.AllExecutables = make(map[string]string) + } + + ms.AllExecutables["linux-amd64"] = ms.Executables.LinuxAmd64 + ms.AllExecutables["darwin-amd64"] = ms.Executables.DarwinAmd64 + ms.AllExecutables["windows-amd64"] = ms.Executables.WindowsAmd64 + } + + return yaml.Marshal((*auxManifestServer)(ms)) +} + +func (ms *ManifestServer) UnmarshalYAML(unmarshal func(interface{}) error) error { + type auxManifestServer ManifestServer + + aux := (*auxManifestServer)(ms) + if err := unmarshal(&aux); err != nil { + return err + } + + if len(aux.AllExecutables) > 0 { + ms.Executables = &ManifestExecutables{ + LinuxAmd64: aux.AllExecutables["linux-amd64"], + DarwinAmd64: aux.AllExecutables["darwin-amd64"], + WindowsAmd64: aux.AllExecutables["windows-amd64"], + } + } + + return nil +} + +// ManifestExecutables is a legacy structure capturing a subet of the known platform executables. type ManifestExecutables struct { // LinuxAmd64 is the path to your executable binary for the corresponding platform LinuxAmd64 string `json:"linux-amd64,omitempty" yaml:"linux-amd64,omitempty"` @@ -247,14 +372,9 @@ func (m *Manifest) GetExecutableForRuntime(goOs, goArch string) string { } var executable string - if server.Executables != nil { - if goOs == "linux" && goArch == "amd64" { - executable = server.Executables.LinuxAmd64 - } else if goOs == "darwin" && goArch == "amd64" { - executable = server.Executables.DarwinAmd64 - } else if goOs == "windows" && goArch == "amd64" { - executable = server.Executables.WindowsAmd64 - } + if len(server.AllExecutables) > 0 { + osArch := fmt.Sprintf("%s-%s", goOs, goArch) + executable = server.AllExecutables[osArch] } if executable == "" { @@ -284,6 +404,121 @@ func (m *Manifest) MeetMinServerVersion(serverVersion string) (bool, error) { return true, nil } +func (m *Manifest) IsValid() error { + if !IsValidPluginId(m.Id) { + return errors.New("invalid plugin ID") + } + + if strings.TrimSpace(m.Name) == "" { + return errors.New("a plugin name is needed") + } + + if m.HomepageURL != "" && !IsValidHttpUrl(m.HomepageURL) { + return errors.New("invalid HomepageURL") + } + + if m.SupportURL != "" && !IsValidHttpUrl(m.SupportURL) { + return errors.New("invalid SupportURL") + } + + if m.ReleaseNotesURL != "" && !IsValidHttpUrl(m.ReleaseNotesURL) { + return errors.New("invalid ReleaseNotesURL") + } + + if m.Version != "" { + _, err := semver.Parse(m.Version) + if err != nil { + return errors.Wrap(err, "failed to parse Version") + } + } + + if m.MinServerVersion != "" { + _, err := semver.Parse(m.MinServerVersion) + if err != nil { + return errors.Wrap(err, "failed to parse MinServerVersion") + } + } + + if m.SettingsSchema != nil { + err := m.SettingsSchema.isValid() + if err != nil { + return errors.Wrap(err, "invalid settings schema") + } + } + + return nil +} + +func (s *PluginSettingsSchema) isValid() error { + for _, setting := range s.Settings { + err := setting.isValid() + if err != nil { + return err + } + } + + return nil +} + +func (s *PluginSetting) isValid() error { + pluginSettingType, err := convertTypeToPluginSettingType(s.Type) + if err != nil { + return err + } + + if s.RegenerateHelpText != "" && pluginSettingType != Generated { + return errors.New("should not set RegenerateHelpText for setting type that is not generated") + } + + if s.Placeholder != "" && !(pluginSettingType == Generated || + pluginSettingType == Text || + pluginSettingType == LongText || + pluginSettingType == Number || + pluginSettingType == Username) { + return errors.New("should not set Placeholder for setting type not in text, generated or username") + } + + if s.Options != nil { + if pluginSettingType != Radio && pluginSettingType != Dropdown { + return errors.New("should not set Options for setting type not in radio or dropdown") + } + + for _, option := range s.Options { + if option.DisplayName == "" || option.Value == "" { + return errors.New("should not have empty Displayname or Value for any option") + } + } + } + + return nil +} + +func convertTypeToPluginSettingType(t string) (PluginSettingType, error) { + var settingType PluginSettingType + switch t { + case "bool": + return Bool, nil + case "dropdown": + return Dropdown, nil + case "generated": + return Generated, nil + case "radio": + return Radio, nil + case "text": + return Text, nil + case "number": + return Number, nil + case "longtext": + return LongText, nil + case "username": + return Username, nil + case "custom": + return Custom, nil + default: + return settingType, errors.New("invalid setting type: " + t) + } +} + // FindManifest will find and parse the manifest in a given directory. // // In all cases other than a does-not-exist error, path is set to the path of the manifest file that was diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/marketplace_plugin.go b/vendor/github.com/mattermost/mattermost-server/v5/model/marketplace_plugin.go new file mode 100644 index 0000000..cad93df --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/marketplace_plugin.go @@ -0,0 +1,136 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "io" + "net/url" + "strconv" + + "github.com/pkg/errors" +) + +// BaseMarketplacePlugin is a Mattermost plugin received from the Marketplace server. +type BaseMarketplacePlugin struct { + HomepageURL string `json:"homepage_url"` + IconData string `json:"icon_data"` + DownloadURL string `json:"download_url"` + ReleaseNotesURL string `json:"release_notes_url"` + Labels []MarketplaceLabel `json:"labels,omitempty"` + Hosting string `json:"hosting"` // Indicated if the plugin is limited to a certain hosting type + AuthorType string `json:"author_type"` // The maintainer of the plugin + ReleaseStage string `json:"release_stage"` // The stage in the software release cycle that the plugin is in + Enterprise bool `json:"enterprise"` // Indicated if the plugin is an enterprise plugin + Signature string `json:"signature"` // Signature represents a signature of a plugin saved in base64 encoding. + Manifest *Manifest `json:"manifest"` +} + +// MarketplaceLabel represents a label shown in the Marketplace UI. +type MarketplaceLabel struct { + Name string `json:"name"` + Description string `json:"description"` + URL string `json:"url"` + Color string `json:"color"` +} + +// MarketplacePlugin is a state aware Marketplace plugin. +type MarketplacePlugin struct { + *BaseMarketplacePlugin + InstalledVersion string `json:"installed_version"` +} + +// BaseMarketplacePluginsFromReader decodes a json-encoded list of plugins from the given io.Reader. +func BaseMarketplacePluginsFromReader(reader io.Reader) ([]*BaseMarketplacePlugin, error) { + plugins := []*BaseMarketplacePlugin{} + decoder := json.NewDecoder(reader) + + if err := decoder.Decode(&plugins); err != nil && err != io.EOF { + return nil, err + } + + return plugins, nil +} + +// MarketplacePluginsFromReader decodes a json-encoded list of plugins from the given io.Reader. +func MarketplacePluginsFromReader(reader io.Reader) ([]*MarketplacePlugin, error) { + plugins := []*MarketplacePlugin{} + decoder := json.NewDecoder(reader) + + if err := decoder.Decode(&plugins); err != nil && err != io.EOF { + return nil, err + } + + return plugins, nil +} + +// DecodeSignature Decodes signature and returns ReadSeeker. +func (plugin *BaseMarketplacePlugin) DecodeSignature() (io.ReadSeeker, error) { + signatureBytes, err := base64.StdEncoding.DecodeString(plugin.Signature) + if err != nil { + return nil, errors.Wrap(err, "Unable to decode base64 signature.") + } + return bytes.NewReader(signatureBytes), nil +} + +// MarketplacePluginFilter describes the parameters to request a list of plugins. +type MarketplacePluginFilter struct { + Page int + PerPage int + Filter string + ServerVersion string + BuildEnterpriseReady bool + EnterprisePlugins bool + Cloud bool + LocalOnly bool + Platform string + PluginId string + ReturnAllVersions bool +} + +// ApplyToURL modifies the given url to include query string parameters for the request. +func (filter *MarketplacePluginFilter) ApplyToURL(u *url.URL) { + q := u.Query() + q.Add("page", strconv.Itoa(filter.Page)) + if filter.PerPage > 0 { + q.Add("per_page", strconv.Itoa(filter.PerPage)) + } + q.Add("filter", filter.Filter) + q.Add("server_version", filter.ServerVersion) + q.Add("build_enterprise_ready", strconv.FormatBool(filter.BuildEnterpriseReady)) + q.Add("enterprise_plugins", strconv.FormatBool(filter.EnterprisePlugins)) + q.Add("cloud", strconv.FormatBool(filter.Cloud)) + q.Add("local_only", strconv.FormatBool(filter.LocalOnly)) + q.Add("platform", filter.Platform) + q.Add("plugin_id", filter.PluginId) + q.Add("return_all_versions", strconv.FormatBool(filter.ReturnAllVersions)) + u.RawQuery = q.Encode() +} + +// InstallMarketplacePluginRequest struct describes parameters of the requested plugin. +type InstallMarketplacePluginRequest struct { + Id string `json:"id"` + Version string `json:"version"` +} + +// PluginRequestFromReader decodes a json-encoded plugin request from the given io.Reader. +func PluginRequestFromReader(reader io.Reader) (*InstallMarketplacePluginRequest, error) { + var r *InstallMarketplacePluginRequest + err := json.NewDecoder(reader).Decode(&r) + if err != nil { + return nil, err + } + return r, nil +} + +// ToJson method will return json from plugin request. +func (r *InstallMarketplacePluginRequest) ToJson() (string, error) { + b, err := json.Marshal(r) + if err != nil { + return "", err + } + return string(b), nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/mention_map.go b/vendor/github.com/mattermost/mattermost-server/v5/model/mention_map.go new file mode 100644 index 0000000..2f3444d --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/mention_map.go @@ -0,0 +1,80 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "fmt" + "net/url" +) + +type UserMentionMap map[string]string +type ChannelMentionMap map[string]string + +const ( + userMentionsKey = "user_mentions" + userMentionsIdsKey = "user_mentions_ids" + channelMentionsKey = "channel_mentions" + channelMentionsIdsKey = "channel_mentions_ids" +) + +func UserMentionMapFromURLValues(values url.Values) (UserMentionMap, error) { + return mentionsFromURLValues(values, userMentionsKey, userMentionsIdsKey) +} + +func (m UserMentionMap) ToURLValues() url.Values { + return mentionsToURLValues(m, userMentionsKey, userMentionsIdsKey) +} + +func ChannelMentionMapFromURLValues(values url.Values) (ChannelMentionMap, error) { + return mentionsFromURLValues(values, channelMentionsKey, channelMentionsIdsKey) +} + +func (m ChannelMentionMap) ToURLValues() url.Values { + return mentionsToURLValues(m, channelMentionsKey, channelMentionsIdsKey) +} + +func mentionsFromURLValues(values url.Values, mentionKey, idKey string) (map[string]string, error) { + mentions, mentionsOk := values[mentionKey] + ids, idsOk := values[idKey] + + if !mentionsOk && !idsOk { + return map[string]string{}, nil + } + + if !mentionsOk { + return nil, fmt.Errorf("%s key not found", mentionKey) + } + + if !idsOk { + return nil, fmt.Errorf("%s key not found", idKey) + } + + if len(mentions) != len(ids) { + return nil, fmt.Errorf("keys %s and %s have different length", mentionKey, idKey) + } + + mentionsMap := make(map[string]string) + for i, mention := range mentions { + id := ids[i] + + if oldId, ok := mentionsMap[mention]; ok && oldId != id { + return nil, fmt.Errorf("key %s has two different values: %s and %s", mention, oldId, id) + } + + mentionsMap[mention] = id + } + + return mentionsMap, nil +} + +func mentionsToURLValues(mentions map[string]string, mentionKey, idKey string) url.Values { + values := url.Values{} + + for mention, id := range mentions { + values.Add(mentionKey, mention) + values.Add(idKey, id) + } + + return values +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/message_export.go b/vendor/github.com/mattermost/mattermost-server/v5/model/message_export.go similarity index 80% rename from vendor/github.com/mattermost/mattermost-server/model/message_export.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/message_export.go index 834c84e..88108e2 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/message_export.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/message_export.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -21,6 +21,7 @@ type MessageExport struct { PostId *string PostCreateAt *int64 PostUpdateAt *int64 + PostDeleteAt *int64 PostMessage *string PostType *string PostRootId *string diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/mfa_secret.go b/vendor/github.com/mattermost/mattermost-server/v5/model/mfa_secret.go new file mode 100644 index 0000000..b229356 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/mfa_secret.go @@ -0,0 +1,25 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type MfaSecret struct { + Secret string `json:"secret"` + QRCode string `json:"qr_code"` +} + +func (mfa *MfaSecret) ToJson() string { + b, _ := json.Marshal(mfa) + return string(b) +} + +func MfaSecretFromJson(data io.Reader) *MfaSecret { + var mfa *MfaSecret + json.NewDecoder(data).Decode(&mfa) + return mfa +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/migration.go b/vendor/github.com/mattermost/mattermost-server/v5/model/migration.go new file mode 100644 index 0000000..387d4d5 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/migration.go @@ -0,0 +1,29 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +const ( + ADVANCED_PERMISSIONS_MIGRATION_KEY = "AdvancedPermissionsMigrationComplete" + MIGRATION_KEY_ADVANCED_PERMISSIONS_PHASE_2 = "migration_advanced_permissions_phase_2" + + MIGRATION_KEY_EMOJI_PERMISSIONS_SPLIT = "emoji_permissions_split" + MIGRATION_KEY_WEBHOOK_PERMISSIONS_SPLIT = "webhook_permissions_split" + MIGRATION_KEY_LIST_JOIN_PUBLIC_PRIVATE_TEAMS = "list_join_public_private_teams" + MIGRATION_KEY_REMOVE_PERMANENT_DELETE_USER = "remove_permanent_delete_user" + MIGRATION_KEY_ADD_BOT_PERMISSIONS = "add_bot_permissions" + MIGRATION_KEY_APPLY_CHANNEL_MANAGE_DELETE_TO_CHANNEL_USER = "apply_channel_manage_delete_to_channel_user" + MIGRATION_KEY_REMOVE_CHANNEL_MANAGE_DELETE_FROM_TEAM_USER = "remove_channel_manage_delete_from_team_user" + MIGRATION_KEY_VIEW_MEMBERS_NEW_PERMISSION = "view_members_new_permission" + MIGRATION_KEY_ADD_MANAGE_GUESTS_PERMISSIONS = "add_manage_guests_permissions" + MIGRATION_KEY_CHANNEL_MODERATIONS_PERMISSIONS = "channel_moderations_permissions" + MIGRATION_KEY_ADD_USE_GROUP_MENTIONS_PERMISSION = "add_use_group_mentions_permission" + MIGRATION_KEY_ADD_SYSTEM_CONSOLE_PERMISSIONS = "add_system_console_permissions" + MIGRATION_KEY_SIDEBAR_CATEGORIES_PHASE_2 = "migration_sidebar_categories_phase_2" + MIGRATION_KEY_ADD_CONVERT_CHANNEL_PERMISSIONS = "add_convert_channel_permissions" + MIGRATION_KEY_ADD_SYSTEM_ROLES_PERMISSIONS = "add_system_roles_permissions" + MIGRATION_KEY_ADD_BILLING_PERMISSIONS = "add_billing_permissions" + MIGRATION_KEY_ADD_MANAGE_SHARED_CHANNEL_PERMISSIONS = "manage_shared_channel_permissions" + MIGRATION_KEY_ADD_MANAGE_REMOTE_CLUSTERS_PERMISSIONS = "manage_remote_clusters_permissions" + MIGRATION_KEY_ADD_DOWNLOAD_COMPLIANCE_EXPORT_RESULTS = "download_compliance_export_results" +) diff --git a/vendor/github.com/mattermost/mattermost-server/model/oauth.go b/vendor/github.com/mattermost/mattermost-server/v5/model/oauth.go similarity index 97% rename from vendor/github.com/mattermost/mattermost-server/model/oauth.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/oauth.go index 49f018f..4a345a6 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/oauth.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/oauth.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -37,7 +37,7 @@ type OAuthApp struct { // correctly. func (a *OAuthApp) IsValid() *AppError { - if len(a.Id) != 26 { + if !IsValidId(a.Id) { return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.app_id.app_error", nil, "", http.StatusBadRequest) } @@ -49,7 +49,7 @@ func (a *OAuthApp) IsValid() *AppError { return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.update_at.app_error", nil, "app_id="+a.Id, http.StatusBadRequest) } - if len(a.CreatorId) != 26 { + if !IsValidId(a.CreatorId) { return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.creator_id.app_error", nil, "app_id="+a.Id, http.StatusBadRequest) } diff --git a/vendor/github.com/mattermost/mattermost-server/model/outgoing_webhook.go b/vendor/github.com/mattermost/mattermost-server/v5/model/outgoing_webhook.go similarity index 97% rename from vendor/github.com/mattermost/mattermost-server/model/outgoing_webhook.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/outgoing_webhook.go index 6da3c6e..d637935 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/outgoing_webhook.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/outgoing_webhook.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -112,12 +112,15 @@ func (o *OutgoingWebhookResponse) ToJson() string { func OutgoingWebhookResponseFromJson(data io.Reader) (*OutgoingWebhookResponse, error) { var o *OutgoingWebhookResponse err := json.NewDecoder(data).Decode(&o) + if err == io.EOF { + return nil, nil + } return o, err } func (o *OutgoingWebhook) IsValid() *AppError { - if len(o.Id) != 26 { + if !IsValidId(o.Id) { return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.id.app_error", nil, "", http.StatusBadRequest) } @@ -133,15 +136,15 @@ func (o *OutgoingWebhook) IsValid() *AppError { return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.update_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) } - if len(o.CreatorId) != 26 { + if !IsValidId(o.CreatorId) { return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) } - if len(o.ChannelId) != 0 && len(o.ChannelId) != 26 { + if o.ChannelId != "" && !IsValidId(o.ChannelId) { return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.channel_id.app_error", nil, "", http.StatusBadRequest) } - if len(o.TeamId) != 26 { + if !IsValidId(o.TeamId) { return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.team_id.app_error", nil, "", http.StatusBadRequest) } diff --git a/vendor/github.com/mattermost/mattermost-server/model/permission.go b/vendor/github.com/mattermost/mattermost-server/v5/model/permission.go similarity index 50% rename from vendor/github.com/mattermost/mattermost-server/model/permission.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/permission.go index 09a506c..e7b8508 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/permission.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/permission.go @@ -1,12 +1,12 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model const ( - PERMISSION_SCOPE_SYSTEM = "system_scope" - PERMISSION_SCOPE_TEAM = "team_scope" - PERMISSION_SCOPE_CHANNEL = "channel_scope" + PermissionScopeSystem = "system_scope" + PermissionScopeTeam = "team_scope" + PermissionScopeChannel = "channel_scope" ) type Permission struct { @@ -25,6 +25,8 @@ var PERMISSION_CREATE_PUBLIC_CHANNEL *Permission var PERMISSION_CREATE_PRIVATE_CHANNEL *Permission var PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS *Permission var PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS *Permission +var PERMISSION_CONVERT_PUBLIC_CHANNEL_TO_PRIVATE *Permission +var PERMISSION_CONVERT_PRIVATE_CHANNEL_TO_PUBLIC *Permission var PERMISSION_ASSIGN_SYSTEM_ADMIN_ROLE *Permission var PERMISSION_MANAGE_ROLES *Permission var PERMISSION_MANAGE_TEAM_ROLES *Permission @@ -43,6 +45,8 @@ var PERMISSION_DELETE_PUBLIC_CHANNEL *Permission var PERMISSION_DELETE_PRIVATE_CHANNEL *Permission var PERMISSION_EDIT_OTHER_USERS *Permission var PERMISSION_READ_CHANNEL *Permission +var PERMISSION_READ_PUBLIC_CHANNEL_GROUPS *Permission +var PERMISSION_READ_PRIVATE_CHANNEL_GROUPS *Permission var PERMISSION_READ_PUBLIC_CHANNEL *Permission var PERMISSION_ADD_REACTION *Permission var PERMISSION_REMOVE_REACTION *Permission @@ -76,6 +80,7 @@ var PERMISSION_MANAGE_TEAM *Permission var PERMISSION_IMPORT_TEAM *Permission var PERMISSION_VIEW_TEAM *Permission var PERMISSION_LIST_USERS_WITHOUT_TEAM *Permission +var PERMISSION_READ_JOBS *Permission var PERMISSION_MANAGE_JOBS *Permission var PERMISSION_CREATE_USER_ACCESS_TOKEN *Permission var PERMISSION_READ_USER_ACCESS_TOKEN *Permission @@ -87,501 +92,932 @@ var PERMISSION_READ_OTHERS_BOTS *Permission var PERMISSION_MANAGE_BOTS *Permission var PERMISSION_MANAGE_OTHERS_BOTS *Permission var PERMISSION_VIEW_MEMBERS *Permission +var PERMISSION_INVITE_GUEST *Permission +var PERMISSION_PROMOTE_GUEST *Permission +var PERMISSION_DEMOTE_TO_GUEST *Permission +var PERMISSION_USE_CHANNEL_MENTIONS *Permission +var PERMISSION_USE_GROUP_MENTIONS *Permission +var PERMISSION_READ_OTHER_USERS_TEAMS *Permission +var PERMISSION_EDIT_BRAND *Permission +var PERMISSION_MANAGE_SHARED_CHANNELS *Permission +var PERMISSION_MANAGE_REMOTE_CLUSTERS *Permission +var PERMISSION_DOWNLOAD_COMPLIANCE_EXPORT_RESULT *Permission + +var PERMISSION_SYSCONSOLE_READ_ABOUT *Permission +var PERMISSION_SYSCONSOLE_WRITE_ABOUT *Permission + +var PERMISSION_SYSCONSOLE_READ_BILLING *Permission +var PERMISSION_SYSCONSOLE_WRITE_BILLING *Permission + +var PERMISSION_SYSCONSOLE_READ_REPORTING *Permission +var PERMISSION_SYSCONSOLE_WRITE_REPORTING *Permission + +var PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_USERS *Permission +var PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_USERS *Permission + +var PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_GROUPS *Permission +var PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_GROUPS *Permission + +var PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_TEAMS *Permission +var PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_TEAMS *Permission + +var PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_CHANNELS *Permission +var PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_CHANNELS *Permission + +var PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_PERMISSIONS *Permission +var PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_PERMISSIONS *Permission + +var PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_SYSTEM_ROLES *Permission +var PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_SYSTEM_ROLES *Permission + +var PERMISSION_SYSCONSOLE_READ_ENVIRONMENT *Permission +var PERMISSION_SYSCONSOLE_WRITE_ENVIRONMENT *Permission + +var PERMISSION_SYSCONSOLE_READ_SITE *Permission +var PERMISSION_SYSCONSOLE_WRITE_SITE *Permission + +var PERMISSION_SYSCONSOLE_READ_AUTHENTICATION *Permission +var PERMISSION_SYSCONSOLE_WRITE_AUTHENTICATION *Permission + +var PERMISSION_SYSCONSOLE_READ_PLUGINS *Permission +var PERMISSION_SYSCONSOLE_WRITE_PLUGINS *Permission + +var PERMISSION_SYSCONSOLE_READ_INTEGRATIONS *Permission +var PERMISSION_SYSCONSOLE_WRITE_INTEGRATIONS *Permission + +var PERMISSION_SYSCONSOLE_READ_COMPLIANCE *Permission +var PERMISSION_SYSCONSOLE_WRITE_COMPLIANCE *Permission + +var PERMISSION_SYSCONSOLE_READ_EXPERIMENTAL *Permission +var PERMISSION_SYSCONSOLE_WRITE_EXPERIMENTAL *Permission // General permission that encompasses all system admin functions // in the future this could be broken up to allow access to some // admin functions but not others var PERMISSION_MANAGE_SYSTEM *Permission -var ALL_PERMISSIONS []*Permission +var AllPermissions []*Permission +var DeprecatedPermissions []*Permission + +var ChannelModeratedPermissions []string +var ChannelModeratedPermissionsMap map[string]string + +var SysconsoleReadPermissions []*Permission +var SysconsoleWritePermissions []*Permission func initializePermissions() { PERMISSION_INVITE_USER = &Permission{ "invite_user", "authentication.permissions.team_invite_user.name", "authentication.permissions.team_invite_user.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_ADD_USER_TO_TEAM = &Permission{ "add_user_to_team", "authentication.permissions.add_user_to_team.name", "authentication.permissions.add_user_to_team.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_USE_SLASH_COMMANDS = &Permission{ "use_slash_commands", "authentication.permissions.team_use_slash_commands.name", "authentication.permissions.team_use_slash_commands.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_MANAGE_SLASH_COMMANDS = &Permission{ "manage_slash_commands", "authentication.permissions.manage_slash_commands.name", "authentication.permissions.manage_slash_commands.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_MANAGE_OTHERS_SLASH_COMMANDS = &Permission{ "manage_others_slash_commands", "authentication.permissions.manage_others_slash_commands.name", "authentication.permissions.manage_others_slash_commands.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_CREATE_PUBLIC_CHANNEL = &Permission{ "create_public_channel", "authentication.permissions.create_public_channel.name", "authentication.permissions.create_public_channel.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_CREATE_PRIVATE_CHANNEL = &Permission{ "create_private_channel", "authentication.permissions.create_private_channel.name", "authentication.permissions.create_private_channel.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS = &Permission{ "manage_public_channel_members", "authentication.permissions.manage_public_channel_members.name", "authentication.permissions.manage_public_channel_members.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS = &Permission{ "manage_private_channel_members", "authentication.permissions.manage_private_channel_members.name", "authentication.permissions.manage_private_channel_members.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, + } + PERMISSION_CONVERT_PUBLIC_CHANNEL_TO_PRIVATE = &Permission{ + "convert_public_channel_to_private", + "authentication.permissions.convert_public_channel_to_private.name", + "authentication.permissions.convert_public_channel_to_private.description", + PermissionScopeChannel, + } + PERMISSION_CONVERT_PRIVATE_CHANNEL_TO_PUBLIC = &Permission{ + "convert_private_channel_to_public", + "authentication.permissions.convert_private_channel_to_public.name", + "authentication.permissions.convert_private_channel_to_public.description", + PermissionScopeChannel, } PERMISSION_ASSIGN_SYSTEM_ADMIN_ROLE = &Permission{ "assign_system_admin_role", "authentication.permissions.assign_system_admin_role.name", "authentication.permissions.assign_system_admin_role.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_MANAGE_ROLES = &Permission{ "manage_roles", "authentication.permissions.manage_roles.name", "authentication.permissions.manage_roles.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_MANAGE_TEAM_ROLES = &Permission{ "manage_team_roles", "authentication.permissions.manage_team_roles.name", "authentication.permissions.manage_team_roles.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_MANAGE_CHANNEL_ROLES = &Permission{ "manage_channel_roles", "authentication.permissions.manage_channel_roles.name", "authentication.permissions.manage_channel_roles.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_MANAGE_SYSTEM = &Permission{ "manage_system", "authentication.permissions.manage_system.name", "authentication.permissions.manage_system.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_CREATE_DIRECT_CHANNEL = &Permission{ "create_direct_channel", "authentication.permissions.create_direct_channel.name", "authentication.permissions.create_direct_channel.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_CREATE_GROUP_CHANNEL = &Permission{ "create_group_channel", "authentication.permissions.create_group_channel.name", "authentication.permissions.create_group_channel.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_MANAGE_PUBLIC_CHANNEL_PROPERTIES = &Permission{ "manage_public_channel_properties", "authentication.permissions.manage_public_channel_properties.name", "authentication.permissions.manage_public_channel_properties.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_MANAGE_PRIVATE_CHANNEL_PROPERTIES = &Permission{ "manage_private_channel_properties", "authentication.permissions.manage_private_channel_properties.name", "authentication.permissions.manage_private_channel_properties.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_LIST_PUBLIC_TEAMS = &Permission{ "list_public_teams", "authentication.permissions.list_public_teams.name", "authentication.permissions.list_public_teams.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_JOIN_PUBLIC_TEAMS = &Permission{ "join_public_teams", "authentication.permissions.join_public_teams.name", "authentication.permissions.join_public_teams.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_LIST_PRIVATE_TEAMS = &Permission{ "list_private_teams", "authentication.permissions.list_private_teams.name", "authentication.permissions.list_private_teams.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_JOIN_PRIVATE_TEAMS = &Permission{ "join_private_teams", "authentication.permissions.join_private_teams.name", "authentication.permissions.join_private_teams.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_LIST_TEAM_CHANNELS = &Permission{ "list_team_channels", "authentication.permissions.list_team_channels.name", "authentication.permissions.list_team_channels.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_JOIN_PUBLIC_CHANNELS = &Permission{ "join_public_channels", "authentication.permissions.join_public_channels.name", "authentication.permissions.join_public_channels.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_DELETE_PUBLIC_CHANNEL = &Permission{ "delete_public_channel", "authentication.permissions.delete_public_channel.name", "authentication.permissions.delete_public_channel.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_DELETE_PRIVATE_CHANNEL = &Permission{ "delete_private_channel", "authentication.permissions.delete_private_channel.name", "authentication.permissions.delete_private_channel.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_EDIT_OTHER_USERS = &Permission{ "edit_other_users", "authentication.permissions.edit_other_users.name", "authentication.permissions.edit_other_users.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_READ_CHANNEL = &Permission{ "read_channel", "authentication.permissions.read_channel.name", "authentication.permissions.read_channel.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, + } + PERMISSION_READ_PUBLIC_CHANNEL_GROUPS = &Permission{ + "read_public_channel_groups", + "authentication.permissions.read_public_channel_groups.name", + "authentication.permissions.read_public_channel_groups.description", + PermissionScopeChannel, + } + PERMISSION_READ_PRIVATE_CHANNEL_GROUPS = &Permission{ + "read_private_channel_groups", + "authentication.permissions.read_private_channel_groups.name", + "authentication.permissions.read_private_channel_groups.description", + PermissionScopeChannel, } PERMISSION_READ_PUBLIC_CHANNEL = &Permission{ "read_public_channel", "authentication.permissions.read_public_channel.name", "authentication.permissions.read_public_channel.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_ADD_REACTION = &Permission{ "add_reaction", "authentication.permissions.add_reaction.name", "authentication.permissions.add_reaction.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_REMOVE_REACTION = &Permission{ "remove_reaction", "authentication.permissions.remove_reaction.name", "authentication.permissions.remove_reaction.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_REMOVE_OTHERS_REACTIONS = &Permission{ "remove_others_reactions", "authentication.permissions.remove_others_reactions.name", "authentication.permissions.remove_others_reactions.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } // DEPRECATED PERMISSION_PERMANENT_DELETE_USER = &Permission{ "permanent_delete_user", "authentication.permissions.permanent_delete_user.name", "authentication.permissions.permanent_delete_user.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_UPLOAD_FILE = &Permission{ "upload_file", "authentication.permissions.upload_file.name", "authentication.permissions.upload_file.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_GET_PUBLIC_LINK = &Permission{ "get_public_link", "authentication.permissions.get_public_link.name", "authentication.permissions.get_public_link.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } // DEPRECATED PERMISSION_MANAGE_WEBHOOKS = &Permission{ "manage_webhooks", "authentication.permissions.manage_webhooks.name", "authentication.permissions.manage_webhooks.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } // DEPRECATED PERMISSION_MANAGE_OTHERS_WEBHOOKS = &Permission{ "manage_others_webhooks", "authentication.permissions.manage_others_webhooks.name", "authentication.permissions.manage_others_webhooks.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_MANAGE_INCOMING_WEBHOOKS = &Permission{ "manage_incoming_webhooks", "authentication.permissions.manage_incoming_webhooks.name", "authentication.permissions.manage_incoming_webhooks.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_MANAGE_OUTGOING_WEBHOOKS = &Permission{ "manage_outgoing_webhooks", "authentication.permissions.manage_outgoing_webhooks.name", "authentication.permissions.manage_outgoing_webhooks.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_MANAGE_OTHERS_INCOMING_WEBHOOKS = &Permission{ "manage_others_incoming_webhooks", "authentication.permissions.manage_others_incoming_webhooks.name", "authentication.permissions.manage_others_incoming_webhooks.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_MANAGE_OTHERS_OUTGOING_WEBHOOKS = &Permission{ "manage_others_outgoing_webhooks", "authentication.permissions.manage_others_outgoing_webhooks.name", "authentication.permissions.manage_others_outgoing_webhooks.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_MANAGE_OAUTH = &Permission{ "manage_oauth", "authentication.permissions.manage_oauth.name", "authentication.permissions.manage_oauth.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH = &Permission{ "manage_system_wide_oauth", "authentication.permissions.manage_system_wide_oauth.name", "authentication.permissions.manage_system_wide_oauth.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } // DEPRECATED PERMISSION_MANAGE_EMOJIS = &Permission{ "manage_emojis", "authentication.permissions.manage_emojis.name", "authentication.permissions.manage_emojis.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } // DEPRECATED PERMISSION_MANAGE_OTHERS_EMOJIS = &Permission{ "manage_others_emojis", "authentication.permissions.manage_others_emojis.name", "authentication.permissions.manage_others_emojis.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_CREATE_EMOJIS = &Permission{ "create_emojis", "authentication.permissions.create_emojis.name", "authentication.permissions.create_emojis.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_DELETE_EMOJIS = &Permission{ "delete_emojis", "authentication.permissions.delete_emojis.name", "authentication.permissions.delete_emojis.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_DELETE_OTHERS_EMOJIS = &Permission{ "delete_others_emojis", "authentication.permissions.delete_others_emojis.name", "authentication.permissions.delete_others_emojis.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_CREATE_POST = &Permission{ "create_post", "authentication.permissions.create_post.name", "authentication.permissions.create_post.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_CREATE_POST_PUBLIC = &Permission{ "create_post_public", "authentication.permissions.create_post_public.name", "authentication.permissions.create_post_public.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_CREATE_POST_EPHEMERAL = &Permission{ "create_post_ephemeral", "authentication.permissions.create_post_ephemeral.name", "authentication.permissions.create_post_ephemeral.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_EDIT_POST = &Permission{ "edit_post", "authentication.permissions.edit_post.name", "authentication.permissions.edit_post.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_EDIT_OTHERS_POSTS = &Permission{ "edit_others_posts", "authentication.permissions.edit_others_posts.name", "authentication.permissions.edit_others_posts.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_DELETE_POST = &Permission{ "delete_post", "authentication.permissions.delete_post.name", "authentication.permissions.delete_post.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, } PERMISSION_DELETE_OTHERS_POSTS = &Permission{ "delete_others_posts", "authentication.permissions.delete_others_posts.name", "authentication.permissions.delete_others_posts.description", - PERMISSION_SCOPE_CHANNEL, + PermissionScopeChannel, + } + PERMISSION_MANAGE_SHARED_CHANNELS = &Permission{ + "manage_shared_channels", + "authentication.permissions.manage_shared_channels.name", + "authentication.permissions.manage_shared_channels.description", + PermissionScopeSystem, + } + PERMISSION_MANAGE_REMOTE_CLUSTERS = &Permission{ + "manage_remote_clusters", + "authentication.permissions.manage_remote_clusters.name", + "authentication.permissions.manage_remote_clusters.description", + PermissionScopeSystem, + } + + PERMISSION_DOWNLOAD_COMPLIANCE_EXPORT_RESULT = &Permission{ + "download_compliance_export_result", + "authentication.permissions.download_compliance_export_result.name", + "authentication.permissions.download_compliance_export_result.description", + PermissionScopeSystem, } + PERMISSION_REMOVE_USER_FROM_TEAM = &Permission{ "remove_user_from_team", "authentication.permissions.remove_user_from_team.name", "authentication.permissions.remove_user_from_team.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_CREATE_TEAM = &Permission{ "create_team", "authentication.permissions.create_team.name", "authentication.permissions.create_team.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_MANAGE_TEAM = &Permission{ "manage_team", "authentication.permissions.manage_team.name", "authentication.permissions.manage_team.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_IMPORT_TEAM = &Permission{ "import_team", "authentication.permissions.import_team.name", "authentication.permissions.import_team.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_VIEW_TEAM = &Permission{ "view_team", "authentication.permissions.view_team.name", "authentication.permissions.view_team.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, } PERMISSION_LIST_USERS_WITHOUT_TEAM = &Permission{ "list_users_without_team", "authentication.permissions.list_users_without_team.name", "authentication.permissions.list_users_without_team.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_CREATE_USER_ACCESS_TOKEN = &Permission{ "create_user_access_token", "authentication.permissions.create_user_access_token.name", "authentication.permissions.create_user_access_token.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_READ_USER_ACCESS_TOKEN = &Permission{ "read_user_access_token", "authentication.permissions.read_user_access_token.name", "authentication.permissions.read_user_access_token.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_REVOKE_USER_ACCESS_TOKEN = &Permission{ "revoke_user_access_token", "authentication.permissions.revoke_user_access_token.name", "authentication.permissions.revoke_user_access_token.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_CREATE_BOT = &Permission{ "create_bot", "authentication.permissions.create_bot.name", "authentication.permissions.create_bot.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_ASSIGN_BOT = &Permission{ "assign_bot", "authentication.permissions.assign_bot.name", "authentication.permissions.assign_bot.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_READ_BOTS = &Permission{ "read_bots", "authentication.permissions.read_bots.name", "authentication.permissions.read_bots.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_READ_OTHERS_BOTS = &Permission{ "read_others_bots", "authentication.permissions.read_others_bots.name", "authentication.permissions.read_others_bots.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_MANAGE_BOTS = &Permission{ "manage_bots", "authentication.permissions.manage_bots.name", "authentication.permissions.manage_bots.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_MANAGE_OTHERS_BOTS = &Permission{ "manage_others_bots", "authentication.permissions.manage_others_bots.name", "authentication.permissions.manage_others_bots.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, + } + PERMISSION_READ_JOBS = &Permission{ + "read_jobs", + "authentication.permisssions.read_jobs.name", + "authentication.permisssions.read_jobs.description", + PermissionScopeSystem, } PERMISSION_MANAGE_JOBS = &Permission{ "manage_jobs", "authentication.permisssions.manage_jobs.name", "authentication.permisssions.manage_jobs.description", - PERMISSION_SCOPE_SYSTEM, + PermissionScopeSystem, } PERMISSION_VIEW_MEMBERS = &Permission{ "view_members", "authentication.permisssions.view_members.name", "authentication.permisssions.view_members.description", - PERMISSION_SCOPE_TEAM, + PermissionScopeTeam, + } + PERMISSION_INVITE_GUEST = &Permission{ + "invite_guest", + "authentication.permissions.invite_guest.name", + "authentication.permissions.invite_guest.description", + PermissionScopeTeam, + } + PERMISSION_PROMOTE_GUEST = &Permission{ + "promote_guest", + "authentication.permissions.promote_guest.name", + "authentication.permissions.promote_guest.description", + PermissionScopeSystem, + } + PERMISSION_DEMOTE_TO_GUEST = &Permission{ + "demote_to_guest", + "authentication.permissions.demote_to_guest.name", + "authentication.permissions.demote_to_guest.description", + PermissionScopeSystem, + } + PERMISSION_USE_CHANNEL_MENTIONS = &Permission{ + "use_channel_mentions", + "authentication.permissions.use_channel_mentions.name", + "authentication.permissions.use_channel_mentions.description", + PermissionScopeChannel, + } + PERMISSION_USE_GROUP_MENTIONS = &Permission{ + "use_group_mentions", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeChannel, + } + PERMISSION_READ_OTHER_USERS_TEAMS = &Permission{ + "read_other_users_teams", + "authentication.permissions.read_other_users_teams.name", + "authentication.permissions.read_other_users_teams.description", + PermissionScopeSystem, + } + PERMISSION_EDIT_BRAND = &Permission{ + "edit_brand", + "authentication.permissions.edit_brand.name", + "authentication.permissions.edit_brand.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_ABOUT = &Permission{ + "sysconsole_read_about", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_ABOUT = &Permission{ + "sysconsole_write_about", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_BILLING = &Permission{ + "sysconsole_read_billing", + "", + "", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_BILLING = &Permission{ + "sysconsole_write_billing", + "", + "", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_REPORTING = &Permission{ + "sysconsole_read_reporting", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_REPORTING = &Permission{ + "sysconsole_write_reporting", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_USERS = &Permission{ + "sysconsole_read_user_management_users", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_USERS = &Permission{ + "sysconsole_write_user_management_users", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_GROUPS = &Permission{ + "sysconsole_read_user_management_groups", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_GROUPS = &Permission{ + "sysconsole_write_user_management_groups", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_TEAMS = &Permission{ + "sysconsole_read_user_management_teams", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_TEAMS = &Permission{ + "sysconsole_write_user_management_teams", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_CHANNELS = &Permission{ + "sysconsole_read_user_management_channels", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_CHANNELS = &Permission{ + "sysconsole_write_user_management_channels", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_PERMISSIONS = &Permission{ + "sysconsole_read_user_management_permissions", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_PERMISSIONS = &Permission{ + "sysconsole_write_user_management_permissions", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_SYSTEM_ROLES = &Permission{ + "sysconsole_read_user_management_system_roles", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_SYSTEM_ROLES = &Permission{ + "sysconsole_write_user_management_system_roles", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_ENVIRONMENT = &Permission{ + "sysconsole_read_environment", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_ENVIRONMENT = &Permission{ + "sysconsole_write_environment", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_SITE = &Permission{ + "sysconsole_read_site", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_SITE = &Permission{ + "sysconsole_write_site", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_AUTHENTICATION = &Permission{ + "sysconsole_read_authentication", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_AUTHENTICATION = &Permission{ + "sysconsole_write_authentication", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_PLUGINS = &Permission{ + "sysconsole_read_plugins", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_PLUGINS = &Permission{ + "sysconsole_write_plugins", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_INTEGRATIONS = &Permission{ + "sysconsole_read_integrations", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_INTEGRATIONS = &Permission{ + "sysconsole_write_integrations", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_COMPLIANCE = &Permission{ + "sysconsole_read_compliance", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_COMPLIANCE = &Permission{ + "sysconsole_write_compliance", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_PLUGINS = &Permission{ + "sysconsole_read_plugins", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_PLUGINS = &Permission{ + "sysconsole_write_plugins", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_READ_EXPERIMENTAL = &Permission{ + "sysconsole_read_experimental", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, + } + PERMISSION_SYSCONSOLE_WRITE_EXPERIMENTAL = &Permission{ + "sysconsole_write_experimental", + "authentication.permissions.use_group_mentions.name", + "authentication.permissions.use_group_mentions.description", + PermissionScopeSystem, } - ALL_PERMISSIONS = []*Permission{ - PERMISSION_INVITE_USER, - PERMISSION_ADD_USER_TO_TEAM, - PERMISSION_USE_SLASH_COMMANDS, - PERMISSION_MANAGE_SLASH_COMMANDS, - PERMISSION_MANAGE_OTHERS_SLASH_COMMANDS, - PERMISSION_CREATE_PUBLIC_CHANNEL, - PERMISSION_CREATE_PRIVATE_CHANNEL, - PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS, - PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS, + SysconsoleReadPermissions = []*Permission{ + PERMISSION_SYSCONSOLE_READ_ABOUT, + PERMISSION_SYSCONSOLE_READ_BILLING, + PERMISSION_SYSCONSOLE_READ_REPORTING, + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_USERS, + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_GROUPS, + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_TEAMS, + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_CHANNELS, + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_PERMISSIONS, + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_SYSTEM_ROLES, + PERMISSION_SYSCONSOLE_READ_ENVIRONMENT, + PERMISSION_SYSCONSOLE_READ_SITE, + PERMISSION_SYSCONSOLE_READ_AUTHENTICATION, + PERMISSION_SYSCONSOLE_READ_PLUGINS, + PERMISSION_SYSCONSOLE_READ_INTEGRATIONS, + PERMISSION_SYSCONSOLE_READ_COMPLIANCE, + PERMISSION_SYSCONSOLE_READ_EXPERIMENTAL, + } + + SysconsoleWritePermissions = []*Permission{ + PERMISSION_SYSCONSOLE_WRITE_ABOUT, + PERMISSION_SYSCONSOLE_WRITE_BILLING, + PERMISSION_SYSCONSOLE_WRITE_REPORTING, + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_USERS, + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_GROUPS, + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_TEAMS, + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_CHANNELS, + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_PERMISSIONS, + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_SYSTEM_ROLES, + PERMISSION_SYSCONSOLE_WRITE_ENVIRONMENT, + PERMISSION_SYSCONSOLE_WRITE_SITE, + PERMISSION_SYSCONSOLE_WRITE_AUTHENTICATION, + PERMISSION_SYSCONSOLE_WRITE_PLUGINS, + PERMISSION_SYSCONSOLE_WRITE_INTEGRATIONS, + PERMISSION_SYSCONSOLE_WRITE_COMPLIANCE, + PERMISSION_SYSCONSOLE_WRITE_EXPERIMENTAL, + } + + SystemScopedPermissionsMinusSysconsole := []*Permission{ PERMISSION_ASSIGN_SYSTEM_ADMIN_ROLE, PERMISSION_MANAGE_ROLES, - PERMISSION_MANAGE_TEAM_ROLES, - PERMISSION_MANAGE_CHANNEL_ROLES, + PERMISSION_MANAGE_SYSTEM, PERMISSION_CREATE_DIRECT_CHANNEL, PERMISSION_CREATE_GROUP_CHANNEL, - PERMISSION_MANAGE_PUBLIC_CHANNEL_PROPERTIES, - PERMISSION_MANAGE_PRIVATE_CHANNEL_PROPERTIES, PERMISSION_LIST_PUBLIC_TEAMS, PERMISSION_JOIN_PUBLIC_TEAMS, PERMISSION_LIST_PRIVATE_TEAMS, PERMISSION_JOIN_PRIVATE_TEAMS, + PERMISSION_EDIT_OTHER_USERS, + PERMISSION_READ_OTHER_USERS_TEAMS, + PERMISSION_GET_PUBLIC_LINK, + PERMISSION_MANAGE_OAUTH, + PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH, + PERMISSION_CREATE_TEAM, + PERMISSION_LIST_USERS_WITHOUT_TEAM, + PERMISSION_CREATE_USER_ACCESS_TOKEN, + PERMISSION_READ_USER_ACCESS_TOKEN, + PERMISSION_REVOKE_USER_ACCESS_TOKEN, + PERMISSION_CREATE_BOT, + PERMISSION_ASSIGN_BOT, + PERMISSION_READ_BOTS, + PERMISSION_READ_OTHERS_BOTS, + PERMISSION_MANAGE_BOTS, + PERMISSION_MANAGE_OTHERS_BOTS, + PERMISSION_READ_JOBS, + PERMISSION_MANAGE_JOBS, + PERMISSION_PROMOTE_GUEST, + PERMISSION_DEMOTE_TO_GUEST, + PERMISSION_EDIT_BRAND, + PERMISSION_MANAGE_SHARED_CHANNELS, + PERMISSION_MANAGE_REMOTE_CLUSTERS, + PERMISSION_DOWNLOAD_COMPLIANCE_EXPORT_RESULT, + } + + TeamScopedPermissions := []*Permission{ + PERMISSION_INVITE_USER, + PERMISSION_ADD_USER_TO_TEAM, + PERMISSION_MANAGE_SLASH_COMMANDS, + PERMISSION_MANAGE_OTHERS_SLASH_COMMANDS, + PERMISSION_CREATE_PUBLIC_CHANNEL, + PERMISSION_CREATE_PRIVATE_CHANNEL, + PERMISSION_MANAGE_TEAM_ROLES, PERMISSION_LIST_TEAM_CHANNELS, PERMISSION_JOIN_PUBLIC_CHANNELS, - PERMISSION_DELETE_PUBLIC_CHANNEL, - PERMISSION_DELETE_PRIVATE_CHANNEL, - PERMISSION_EDIT_OTHER_USERS, - PERMISSION_READ_CHANNEL, PERMISSION_READ_PUBLIC_CHANNEL, - PERMISSION_ADD_REACTION, - PERMISSION_REMOVE_REACTION, - PERMISSION_REMOVE_OTHERS_REACTIONS, - PERMISSION_PERMANENT_DELETE_USER, - PERMISSION_UPLOAD_FILE, - PERMISSION_GET_PUBLIC_LINK, - PERMISSION_MANAGE_WEBHOOKS, - PERMISSION_MANAGE_OTHERS_WEBHOOKS, PERMISSION_MANAGE_INCOMING_WEBHOOKS, PERMISSION_MANAGE_OUTGOING_WEBHOOKS, PERMISSION_MANAGE_OTHERS_INCOMING_WEBHOOKS, PERMISSION_MANAGE_OTHERS_OUTGOING_WEBHOOKS, - PERMISSION_MANAGE_OAUTH, - PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH, - PERMISSION_MANAGE_EMOJIS, - PERMISSION_MANAGE_OTHERS_EMOJIS, PERMISSION_CREATE_EMOJIS, PERMISSION_DELETE_EMOJIS, PERMISSION_DELETE_OTHERS_EMOJIS, + PERMISSION_REMOVE_USER_FROM_TEAM, + PERMISSION_MANAGE_TEAM, + PERMISSION_IMPORT_TEAM, + PERMISSION_VIEW_TEAM, + PERMISSION_VIEW_MEMBERS, + PERMISSION_INVITE_GUEST, + } + + ChannelScopedPermissions := []*Permission{ + PERMISSION_USE_SLASH_COMMANDS, + PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS, + PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS, + PERMISSION_MANAGE_CHANNEL_ROLES, + PERMISSION_MANAGE_PUBLIC_CHANNEL_PROPERTIES, + PERMISSION_MANAGE_PRIVATE_CHANNEL_PROPERTIES, + PERMISSION_CONVERT_PUBLIC_CHANNEL_TO_PRIVATE, + PERMISSION_CONVERT_PRIVATE_CHANNEL_TO_PUBLIC, + PERMISSION_DELETE_PUBLIC_CHANNEL, + PERMISSION_DELETE_PRIVATE_CHANNEL, + PERMISSION_READ_CHANNEL, + PERMISSION_READ_PUBLIC_CHANNEL_GROUPS, + PERMISSION_READ_PRIVATE_CHANNEL_GROUPS, + PERMISSION_ADD_REACTION, + PERMISSION_REMOVE_REACTION, + PERMISSION_REMOVE_OTHERS_REACTIONS, + PERMISSION_UPLOAD_FILE, PERMISSION_CREATE_POST, PERMISSION_CREATE_POST_PUBLIC, PERMISSION_CREATE_POST_EPHEMERAL, @@ -589,23 +1025,39 @@ func initializePermissions() { PERMISSION_EDIT_OTHERS_POSTS, PERMISSION_DELETE_POST, PERMISSION_DELETE_OTHERS_POSTS, - PERMISSION_REMOVE_USER_FROM_TEAM, - PERMISSION_CREATE_TEAM, - PERMISSION_MANAGE_TEAM, - PERMISSION_IMPORT_TEAM, - PERMISSION_VIEW_TEAM, - PERMISSION_LIST_USERS_WITHOUT_TEAM, - PERMISSION_MANAGE_JOBS, - PERMISSION_CREATE_USER_ACCESS_TOKEN, - PERMISSION_READ_USER_ACCESS_TOKEN, - PERMISSION_REVOKE_USER_ACCESS_TOKEN, - PERMISSION_CREATE_BOT, - PERMISSION_READ_BOTS, - PERMISSION_READ_OTHERS_BOTS, - PERMISSION_MANAGE_BOTS, - PERMISSION_MANAGE_OTHERS_BOTS, - PERMISSION_MANAGE_SYSTEM, - PERMISSION_VIEW_MEMBERS, + PERMISSION_USE_CHANNEL_MENTIONS, + PERMISSION_USE_GROUP_MENTIONS, + } + + DeprecatedPermissions = []*Permission{ + PERMISSION_PERMANENT_DELETE_USER, + PERMISSION_MANAGE_WEBHOOKS, + PERMISSION_MANAGE_OTHERS_WEBHOOKS, + PERMISSION_MANAGE_EMOJIS, + PERMISSION_MANAGE_OTHERS_EMOJIS, + } + + AllPermissions = []*Permission{} + AllPermissions = append(AllPermissions, SystemScopedPermissionsMinusSysconsole...) + AllPermissions = append(AllPermissions, TeamScopedPermissions...) + AllPermissions = append(AllPermissions, ChannelScopedPermissions...) + AllPermissions = append(AllPermissions, SysconsoleReadPermissions...) + AllPermissions = append(AllPermissions, SysconsoleWritePermissions...) + + ChannelModeratedPermissions = []string{ + PERMISSION_CREATE_POST.Id, + "create_reactions", + "manage_members", + PERMISSION_USE_CHANNEL_MENTIONS.Id, + } + + ChannelModeratedPermissionsMap = map[string]string{ + PERMISSION_CREATE_POST.Id: ChannelModeratedPermissions[0], + PERMISSION_ADD_REACTION.Id: ChannelModeratedPermissions[1], + PERMISSION_REMOVE_REACTION.Id: ChannelModeratedPermissions[1], + PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS.Id: ChannelModeratedPermissions[2], + PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS.Id: ChannelModeratedPermissions[2], + PERMISSION_USE_CHANNEL_MENTIONS.Id: ChannelModeratedPermissions[3], } } diff --git a/vendor/github.com/mattermost/mattermost-server/model/plugin_event_data.go b/vendor/github.com/mattermost/mattermost-server/v5/model/plugin_event_data.go similarity index 100% rename from vendor/github.com/mattermost/mattermost-server/model/plugin_event_data.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/plugin_event_data.go diff --git a/vendor/github.com/mattermost/mattermost-server/model/plugin_key_value.go b/vendor/github.com/mattermost/mattermost-server/v5/model/plugin_key_value.go similarity index 90% rename from vendor/github.com/mattermost/mattermost-server/model/plugin_key_value.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/plugin_key_value.go index 7e156d9..cd5406e 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/plugin_key_value.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/plugin_key_value.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/plugin_kvset_options.go b/vendor/github.com/mattermost/mattermost-server/v5/model/plugin_kvset_options.go new file mode 100644 index 0000000..1d374c8 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/plugin_kvset_options.go @@ -0,0 +1,47 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "net/http" +) + +// PluginKVSetOptions contains information on how to store a value in the plugin KV store. +type PluginKVSetOptions struct { + Atomic bool // Only store the value if the current value matches the oldValue + OldValue []byte // The value to compare with the current value. Only used when Atomic is true + ExpireInSeconds int64 // Set an expire counter +} + +// IsValid returns nil if the chosen options are valid. +func (opt *PluginKVSetOptions) IsValid() *AppError { + if !opt.Atomic && opt.OldValue != nil { + return NewAppError( + "PluginKVSetOptions.IsValid", + "model.plugin_kvset_options.is_valid.old_value.app_error", + nil, + "", + http.StatusBadRequest, + ) + } + + return nil +} + +// NewPluginKeyValueFromOptions return a PluginKeyValue given a pluginID, a KV pair and options. +func NewPluginKeyValueFromOptions(pluginId, key string, value []byte, opt PluginKVSetOptions) (*PluginKeyValue, *AppError) { + expireAt := int64(0) + if opt.ExpireInSeconds != 0 { + expireAt = GetMillis() + (opt.ExpireInSeconds * 1000) + } + + kv := &PluginKeyValue{ + PluginId: pluginId, + Key: key, + Value: value, + ExpireAt: expireAt, + } + + return kv, nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/plugin_status.go b/vendor/github.com/mattermost/mattermost-server/v5/model/plugin_status.go similarity index 100% rename from vendor/github.com/mattermost/mattermost-server/model/plugin_status.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/plugin_status.go diff --git a/vendor/github.com/mattermost/mattermost-server/plugin/valid.go b/vendor/github.com/mattermost/mattermost-server/v5/model/plugin_valid.go similarity index 77% rename from vendor/github.com/mattermost/mattermost-server/plugin/valid.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/plugin_valid.go index 5c543e6..b614451 100644 --- a/vendor/github.com/mattermost/mattermost-server/plugin/valid.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/plugin_valid.go @@ -1,7 +1,7 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. -package plugin +package model import ( "regexp" @@ -22,11 +22,11 @@ func init() { validId = regexp.MustCompile(ValidIdRegex) } -// IsValidId verifies that the plugin id has a minimum length of 3, maximum length of 190, and +// IsValidPluginId verifies that the plugin id has a minimum length of 3, maximum length of 190, and // contains only alphanumeric characters, dashes, underscores and periods. // // These constraints are necessary since the plugin id is used as part of a filesystem path. -func IsValidId(id string) bool { +func IsValidPluginId(id string) bool { if utf8.RuneCountInString(id) < MinIdLength { return false } diff --git a/vendor/github.com/mattermost/mattermost-server/model/plugins_response.go b/vendor/github.com/mattermost/mattermost-server/v5/model/plugins_response.go similarity index 91% rename from vendor/github.com/mattermost/mattermost-server/model/plugins_response.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/plugins_response.go index 177cfe6..421ee2f 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/plugins_response.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/plugins_response.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model diff --git a/vendor/github.com/mattermost/mattermost-server/model/post.go b/vendor/github.com/mattermost/mattermost-server/v5/model/post.go similarity index 67% rename from vendor/github.com/mattermost/mattermost-server/model/post.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/post.go index 7272969..851289a 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/post.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/post.go @@ -1,17 +1,20 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model import ( "encoding/json" + "errors" "io" "net/http" + "regexp" "sort" "strings" + "sync" "unicode/utf8" - "github.com/mattermost/mattermost-server/utils/markdown" + "github.com/mattermost/mattermost-server/v5/utils/markdown" ) const ( @@ -21,12 +24,14 @@ const ( POST_SYSTEM_GENERIC = "system_generic" POST_JOIN_LEAVE = "system_join_leave" // Deprecated, use POST_JOIN_CHANNEL or POST_LEAVE_CHANNEL instead POST_JOIN_CHANNEL = "system_join_channel" + POST_GUEST_JOIN_CHANNEL = "system_guest_join_channel" POST_LEAVE_CHANNEL = "system_leave_channel" POST_JOIN_TEAM = "system_join_team" POST_LEAVE_TEAM = "system_leave_team" POST_AUTO_RESPONDER = "system_auto_responder" POST_ADD_REMOVE = "system_add_remove" // Deprecated, use POST_ADD_TO_CHANNEL or POST_REMOVE_FROM_CHANNEL instead POST_ADD_TO_CHANNEL = "system_add_to_channel" + POST_ADD_GUEST_TO_CHANNEL = "system_add_guest_to_chan" POST_REMOVE_FROM_CHANNEL = "system_remove_from_channel" POST_MOVE_CHANNEL = "system_move_channel" POST_ADD_TO_TEAM = "system_add_to_team" @@ -36,6 +41,7 @@ const ( POST_CONVERT_CHANNEL = "system_convert_channel" POST_PURPOSE_CHANGE = "system_purpose_change" POST_CHANNEL_DELETED = "system_channel_deleted" + POST_CHANNEL_RESTORED = "system_channel_restored" POST_EPHEMERAL = "system_ephemeral" POST_CHANGE_CHANNEL_PRIVACY = "system_change_chan_privacy" POST_ADD_BOT_TEAMS_CHANNELS = "add_bot_teams_channels" @@ -55,8 +61,14 @@ const ( POST_PROPS_DELETE_BY = "deleteBy" POST_PROPS_OVERRIDE_ICON_URL = "override_icon_url" POST_PROPS_OVERRIDE_ICON_EMOJI = "override_icon_emoji" + + POST_PROPS_MENTION_HIGHLIGHT_DISABLED = "mentionHighlightDisabled" + POST_PROPS_GROUP_HIGHLIGHT_DISABLED = "disable_group_highlight" + POST_SYSTEM_WARN_METRIC_STATUS = "warn_metric_status" ) +var AT_MENTION_PATTEN = regexp.MustCompile(`\B@`) + type Post struct { Id string `json:"id"` CreateAt int64 `json:"create_at"` @@ -71,14 +83,14 @@ type Post struct { OriginalId string `json:"original_id"` Message string `json:"message"` - // MessageSource will contain the message as submitted by the user if Message has been modified // by Mattermost for presentation (e.g if an image proxy is being used). It should be used to // populate edit boxes if present. MessageSource string `json:"message_source,omitempty" db:"-"` Type string `json:"type"` - Props StringInterface `json:"props"` + propsMu sync.RWMutex `db:"-"` // Unexported mutex used to guard Post.Props. + Props StringInterface `json:"props"` // Deprecated: use GetProps() Hashtags string `json:"hashtags"` Filenames StringArray `json:"filenames,omitempty"` // Deprecated, do not use this field any more FileIds StringArray `json:"file_ids,omitempty"` @@ -86,7 +98,10 @@ type Post struct { HasReactions bool `json:"has_reactions,omitempty"` // Transient data populated before sending a post to the client - Metadata *PostMetadata `json:"metadata,omitempty" db:"-"` + ReplyCount int64 `json:"reply_count" db:"-"` + LastReplyAt int64 `json:"last_reply_at" db:"-"` + Participants []*User `json:"participants" db:"-"` + Metadata *PostMetadata `json:"metadata,omitempty" db:"-"` } type PostEphemeral struct { @@ -150,10 +165,54 @@ type PostForIndexing struct { ParentCreateAt *int64 `json:"parent_create_at"` } -// Clone shallowly copies the post. +type FileForIndexing struct { + FileInfo + ChannelId string `json:"channel_id"` + Content string `json:"content"` +} + +// ShallowCopy is an utility function to shallow copy a Post to the given +// destination without touching the internal RWMutex. +func (o *Post) ShallowCopy(dst *Post) error { + if dst == nil { + return errors.New("dst cannot be nil") + } + o.propsMu.RLock() + defer o.propsMu.RUnlock() + dst.propsMu.Lock() + defer dst.propsMu.Unlock() + dst.Id = o.Id + dst.CreateAt = o.CreateAt + dst.UpdateAt = o.UpdateAt + dst.EditAt = o.EditAt + dst.DeleteAt = o.DeleteAt + dst.IsPinned = o.IsPinned + dst.UserId = o.UserId + dst.ChannelId = o.ChannelId + dst.RootId = o.RootId + dst.ParentId = o.ParentId + dst.OriginalId = o.OriginalId + dst.Message = o.Message + dst.MessageSource = o.MessageSource + dst.Type = o.Type + dst.Props = o.Props + dst.Hashtags = o.Hashtags + dst.Filenames = o.Filenames + dst.FileIds = o.FileIds + dst.PendingPostId = o.PendingPostId + dst.HasReactions = o.HasReactions + dst.ReplyCount = o.ReplyCount + dst.Participants = o.Participants + dst.LastReplyAt = o.LastReplyAt + dst.Metadata = o.Metadata + return nil +} + +// Clone shallowly copies the post and returns the copy. func (o *Post) Clone() *Post { - copy := *o - return © + copy := &Post{} + o.ShallowCopy(copy) + return copy } func (o *Post) ToJson() string { @@ -168,6 +227,24 @@ func (o *Post) ToUnsanitizedJson() string { return string(b) } +type GetPostsSinceOptions struct { + ChannelId string + Time int64 + SkipFetchThreads bool + CollapsedThreads bool + CollapsedThreadsExtended bool +} + +type GetPostsOptions struct { + ChannelId string + PostId string + Page int + PerPage int + SkipFetchThreads bool + CollapsedThreads bool + CollapsedThreadsExtended bool +} + func PostFromJson(data io.Reader) *Post { var o *Post json.NewDecoder(data).Decode(&o) @@ -179,8 +256,7 @@ func (o *Post) Etag() string { } func (o *Post) IsValid(maxPostSize int) *AppError { - - if len(o.Id) != 26 { + if !IsValidId(o.Id) { return NewAppError("Post.IsValid", "model.post.is_valid.id.app_error", nil, "", http.StatusBadRequest) } @@ -192,19 +268,19 @@ func (o *Post) IsValid(maxPostSize int) *AppError { return NewAppError("Post.IsValid", "model.post.is_valid.update_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) } - if len(o.UserId) != 26 { + if !IsValidId(o.UserId) { return NewAppError("Post.IsValid", "model.post.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) } - if len(o.ChannelId) != 26 { + if !IsValidId(o.ChannelId) { return NewAppError("Post.IsValid", "model.post.is_valid.channel_id.app_error", nil, "", http.StatusBadRequest) } - if !(len(o.RootId) == 26 || len(o.RootId) == 0) { + if !(IsValidId(o.RootId) || len(o.RootId) == 0) { return NewAppError("Post.IsValid", "model.post.is_valid.root_id.app_error", nil, "", http.StatusBadRequest) } - if !(len(o.ParentId) == 26 || len(o.ParentId) == 0) { + if !(IsValidId(o.ParentId) || len(o.ParentId) == 0) { return NewAppError("Post.IsValid", "model.post.is_valid.parent_id.app_error", nil, "", http.StatusBadRequest) } @@ -227,14 +303,17 @@ func (o *Post) IsValid(maxPostSize int) *AppError { switch o.Type { case POST_DEFAULT, + POST_SYSTEM_GENERIC, POST_JOIN_LEAVE, POST_AUTO_RESPONDER, POST_ADD_REMOVE, POST_JOIN_CHANNEL, + POST_GUEST_JOIN_CHANNEL, POST_LEAVE_CHANNEL, POST_JOIN_TEAM, POST_LEAVE_TEAM, POST_ADD_TO_CHANNEL, + POST_ADD_GUEST_TO_CHANNEL, POST_REMOVE_FROM_CHANNEL, POST_MOVE_CHANNEL, POST_ADD_TO_TEAM, @@ -245,9 +324,11 @@ func (o *Post) IsValid(maxPostSize int) *AppError { POST_DISPLAYNAME_CHANGE, POST_CONVERT_CHANNEL, POST_CHANNEL_DELETED, + POST_CHANNEL_RESTORED, POST_CHANGE_CHANNEL_PRIVACY, POST_ME, - POST_ADD_BOT_TEAMS_CHANNELS: + POST_ADD_BOT_TEAMS_CHANNELS, + POST_SYSTEM_WARN_METRIC_STATUS: default: if !strings.HasPrefix(o.Type, POST_CUSTOM_TYPE_PREFIX) { return NewAppError("Post.IsValid", "model.post.is_valid.type.app_error", nil, "id="+o.Type, http.StatusBadRequest) @@ -262,7 +343,7 @@ func (o *Post) IsValid(maxPostSize int) *AppError { return NewAppError("Post.IsValid", "model.post.is_valid.file_ids.app_error", nil, "id="+o.Id, http.StatusBadRequest) } - if utf8.RuneCountInString(StringInterfaceToJson(o.Props)) > POST_PROPS_MAX_RUNES { + if utf8.RuneCountInString(StringInterfaceToJson(o.GetProps())) > POST_PROPS_MAX_RUNES { return NewAppError("Post.IsValid", "model.post.is_valid.props.app_error", nil, "id="+o.Id, http.StatusBadRequest) } @@ -275,10 +356,13 @@ func (o *Post) SanitizeProps() { } for _, member := range membersToSanitize { - if _, ok := o.Props[member]; ok { - delete(o.Props, member) + if _, ok := o.GetProps()[member]; ok { + o.DelProp(member) } } + for _, p := range o.Participants { + p.Sanitize(map[string]bool{}) + } } func (o *Post) PreSave() { @@ -297,8 +381,8 @@ func (o *Post) PreSave() { } func (o *Post) PreCommit() { - if o.Props == nil { - o.Props = make(map[string]interface{}) + if o.GetProps() == nil { + o.SetProps(make(map[string]interface{})) } if o.Filenames == nil { @@ -316,41 +400,88 @@ func (o *Post) PreCommit() { } func (o *Post) MakeNonNil() { - if o.Props == nil { - o.Props = make(map[string]interface{}) + if o.GetProps() == nil { + o.SetProps(make(map[string]interface{})) } } +func (o *Post) DelProp(key string) { + o.propsMu.Lock() + defer o.propsMu.Unlock() + propsCopy := make(map[string]interface{}, len(o.Props)-1) + for k, v := range o.Props { + propsCopy[k] = v + } + delete(propsCopy, key) + o.Props = propsCopy +} + func (o *Post) AddProp(key string, value interface{}) { + o.propsMu.Lock() + defer o.propsMu.Unlock() + propsCopy := make(map[string]interface{}, len(o.Props)+1) + for k, v := range o.Props { + propsCopy[k] = v + } + propsCopy[key] = value + o.Props = propsCopy +} - o.MakeNonNil() +func (o *Post) GetProps() StringInterface { + o.propsMu.RLock() + defer o.propsMu.RUnlock() + return o.Props +} - o.Props[key] = value +func (o *Post) SetProps(props StringInterface) { + o.propsMu.Lock() + defer o.propsMu.Unlock() + o.Props = props +} + +func (o *Post) GetProp(key string) interface{} { + o.propsMu.RLock() + defer o.propsMu.RUnlock() + return o.Props[key] } func (o *Post) IsSystemMessage() bool { return len(o.Type) >= len(POST_SYSTEM_MESSAGE_PREFIX) && o.Type[:len(POST_SYSTEM_MESSAGE_PREFIX)] == POST_SYSTEM_MESSAGE_PREFIX } -func (p *Post) Patch(patch *PostPatch) { +func (o *Post) IsJoinLeaveMessage() bool { + return o.Type == POST_JOIN_LEAVE || + o.Type == POST_ADD_REMOVE || + o.Type == POST_JOIN_CHANNEL || + o.Type == POST_LEAVE_CHANNEL || + o.Type == POST_JOIN_TEAM || + o.Type == POST_LEAVE_TEAM || + o.Type == POST_ADD_TO_CHANNEL || + o.Type == POST_REMOVE_FROM_CHANNEL || + o.Type == POST_ADD_TO_TEAM || + o.Type == POST_REMOVE_FROM_TEAM +} + +func (o *Post) Patch(patch *PostPatch) { if patch.IsPinned != nil { - p.IsPinned = *patch.IsPinned + o.IsPinned = *patch.IsPinned } if patch.Message != nil { - p.Message = *patch.Message + o.Message = *patch.Message } if patch.Props != nil { - p.Props = *patch.Props + newProps := *patch.Props + o.SetProps(newProps) } if patch.FileIds != nil { - p.FileIds = *patch.FileIds + o.FileIds = *patch.FileIds } if patch.HasReactions != nil { - p.HasReactions = *patch.HasReactions + o.HasReactions = *patch.HasReactions } } @@ -383,31 +514,69 @@ func (o *SearchParameter) SearchParameterToJson() string { return string(b) } -func SearchParameterFromJson(data io.Reader) *SearchParameter { +func SearchParameterFromJson(data io.Reader) (*SearchParameter, error) { decoder := json.NewDecoder(data) var searchParam SearchParameter - err := decoder.Decode(&searchParam) - if err != nil { - return nil + if err := decoder.Decode(&searchParam); err != nil { + return nil, err } - return &searchParam + return &searchParam, nil } func (o *Post) ChannelMentions() []string { return ChannelMentions(o.Message) } +// DisableMentionHighlights disables a posts mention highlighting and returns the first channel mention that was present in the message. +func (o *Post) DisableMentionHighlights() string { + mention, hasMentions := findAtChannelMention(o.Message) + if hasMentions { + o.AddProp(POST_PROPS_MENTION_HIGHLIGHT_DISABLED, true) + } + return mention +} + +// DisableMentionHighlights disables mention highlighting for a post patch if required. +func (o *PostPatch) DisableMentionHighlights() { + if o.Message == nil { + return + } + if _, hasMentions := findAtChannelMention(*o.Message); hasMentions { + if o.Props == nil { + o.Props = &StringInterface{} + } + (*o.Props)[POST_PROPS_MENTION_HIGHLIGHT_DISABLED] = true + } +} + +func findAtChannelMention(message string) (mention string, found bool) { + re := regexp.MustCompile(`(?i)\B@(channel|all|here)\b`) + matched := re.FindStringSubmatch(message) + if found = (len(matched) > 0); found { + mention = strings.ToLower(matched[0]) + } + return +} + func (o *Post) Attachments() []*SlackAttachment { - if attachments, ok := o.Props["attachments"].([]*SlackAttachment); ok { + if attachments, ok := o.GetProp("attachments").([]*SlackAttachment); ok { return attachments } var ret []*SlackAttachment - if attachments, ok := o.Props["attachments"].([]interface{}); ok { + if attachments, ok := o.GetProp("attachments").([]interface{}); ok { for _, attachment := range attachments { if enc, err := json.Marshal(attachment); err == nil { var decoded SlackAttachment if json.Unmarshal(enc, &decoded) == nil { + i := 0 + for _, action := range decoded.Actions { + if action != nil { + decoded.Actions[i] = action + i++ + } + } + decoded.Actions = decoded.Actions[:i] ret = append(ret, &decoded) } } diff --git a/vendor/github.com/mattermost/mattermost-server/model/post_embed.go b/vendor/github.com/mattermost/mattermost-server/v5/model/post_embed.go similarity index 85% rename from vendor/github.com/mattermost/mattermost-server/model/post_embed.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/post_embed.go index 6e8e33c..5c6efec 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/post_embed.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/post_embed.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -7,6 +7,7 @@ const ( POST_EMBED_IMAGE PostEmbedType = "image" POST_EMBED_MESSAGE_ATTACHMENT PostEmbedType = "message_attachment" POST_EMBED_OPENGRAPH PostEmbedType = "opengraph" + POST_EMBED_LINK PostEmbedType = "link" ) type PostEmbedType string diff --git a/vendor/github.com/mattermost/mattermost-server/model/post_list.go b/vendor/github.com/mattermost/mattermost-server/v5/model/post_list.go similarity index 96% rename from vendor/github.com/mattermost/mattermost-server/model/post_list.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/post_list.go index dfb690a..ebf7bef 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/post_list.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/post_list.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -46,9 +46,9 @@ func (o *PostList) StripActionIntegrations() { posts := o.Posts o.Posts = make(map[string]*Post) for id, post := range posts { - pcopy := *post + pcopy := post.Clone() pcopy.StripActionIntegrations() - o.Posts[id] = &pcopy + o.Posts[id] = pcopy } } @@ -58,9 +58,8 @@ func (o *PostList) ToJson() string { b, err := json.Marshal(©) if err != nil { return "" - } else { - return string(b) } + return string(b) } func (o *PostList) MakeNonNil() { diff --git a/vendor/github.com/mattermost/mattermost-server/model/post_metadata.go b/vendor/github.com/mattermost/mattermost-server/v5/model/post_metadata.go similarity index 97% rename from vendor/github.com/mattermost/mattermost-server/model/post_metadata.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/post_metadata.go index 5fb394b..7b0687c 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/post_metadata.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/post_metadata.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model diff --git a/vendor/github.com/mattermost/mattermost-server/model/post_search_results.go b/vendor/github.com/mattermost/mattermost-server/v5/model/post_search_results.go similarity index 90% rename from vendor/github.com/mattermost/mattermost-server/model/post_search_results.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/post_search_results.go index 2317f18..fc9ba08 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/post_search_results.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/post_search_results.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -28,9 +28,8 @@ func (o *PostSearchResults) ToJson() string { b, err := json.Marshal(©) if err != nil { return "" - } else { - return string(b) } + return string(b) } func PostSearchResultsFromJson(data io.Reader) *PostSearchResults { diff --git a/vendor/github.com/mattermost/mattermost-server/model/preference.go b/vendor/github.com/mattermost/mattermost-server/v5/model/preference.go similarity index 75% rename from vendor/github.com/mattermost/mattermost-server/model/preference.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/preference.go index 05379b9..3cb6ec9 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/preference.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/preference.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -13,19 +13,22 @@ import ( ) const ( - PREFERENCE_CATEGORY_DIRECT_CHANNEL_SHOW = "direct_channel_show" - PREFERENCE_CATEGORY_TUTORIAL_STEPS = "tutorial_step" - PREFERENCE_CATEGORY_ADVANCED_SETTINGS = "advanced_settings" - PREFERENCE_CATEGORY_FLAGGED_POST = "flagged_post" - PREFERENCE_CATEGORY_FAVORITE_CHANNEL = "favorite_channel" - PREFERENCE_CATEGORY_SIDEBAR_SETTINGS = "sidebar_settings" - - PREFERENCE_CATEGORY_DISPLAY_SETTINGS = "display_settings" - PREFERENCE_NAME_CHANNEL_DISPLAY_MODE = "channel_display_mode" - PREFERENCE_NAME_COLLAPSE_SETTING = "collapse_previews" - PREFERENCE_NAME_MESSAGE_DISPLAY = "message_display" - PREFERENCE_NAME_NAME_FORMAT = "name_format" - PREFERENCE_NAME_USE_MILITARY_TIME = "use_military_time" + PREFERENCE_CATEGORY_DIRECT_CHANNEL_SHOW = "direct_channel_show" + PREFERENCE_CATEGORY_GROUP_CHANNEL_SHOW = "group_channel_show" + PREFERENCE_CATEGORY_TUTORIAL_STEPS = "tutorial_step" + PREFERENCE_CATEGORY_ADVANCED_SETTINGS = "advanced_settings" + PREFERENCE_CATEGORY_FLAGGED_POST = "flagged_post" + PREFERENCE_CATEGORY_FAVORITE_CHANNEL = "favorite_channel" + PREFERENCE_CATEGORY_SIDEBAR_SETTINGS = "sidebar_settings" + PREFERENCE_CATEGORY_COLLAPSED_THREADS_SETTINGS = "collapsed_threads_settings" + + PREFERENCE_CATEGORY_DISPLAY_SETTINGS = "display_settings" + PREFERENCE_NAME_COLLAPSED_THREADS_ENABLED = "collapsed_threads_enabled" + PREFERENCE_NAME_CHANNEL_DISPLAY_MODE = "channel_display_mode" + PREFERENCE_NAME_COLLAPSE_SETTING = "collapse_previews" + PREFERENCE_NAME_MESSAGE_DISPLAY = "message_display" + PREFERENCE_NAME_NAME_FORMAT = "name_format" + PREFERENCE_NAME_USE_MILITARY_TIME = "use_military_time" PREFERENCE_CATEGORY_THEME = "theme" // the name for theme props is the team id @@ -68,7 +71,7 @@ func PreferenceFromJson(data io.Reader) *Preference { } func (o *Preference) IsValid() *AppError { - if len(o.UserId) != 26 { + if !IsValidId(o.UserId) { return NewAppError("Preference.IsValid", "model.preference.is_valid.id.app_error", nil, "user_id="+o.UserId, http.StatusBadRequest) } diff --git a/vendor/github.com/mattermost/mattermost-server/model/preferences.go b/vendor/github.com/mattermost/mattermost-server/v5/model/preferences.go similarity index 82% rename from vendor/github.com/mattermost/mattermost-server/model/preferences.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/preferences.go index 172e1aa..c2d2486 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/preferences.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/preferences.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -19,9 +19,8 @@ func PreferencesFromJson(data io.Reader) (Preferences, error) { decoder := json.NewDecoder(data) var o Preferences err := decoder.Decode(&o) - if err == nil { - return o, nil - } else { + if err != nil { return nil, err } + return o, nil } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/product_notices.go b/vendor/github.com/mattermost/mattermost-server/v5/model/product_notices.go new file mode 100644 index 0000000..fae8ccb --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/product_notices.go @@ -0,0 +1,214 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "io" + + "github.com/pkg/errors" +) + +type ProductNotices []ProductNotice + +func (r *ProductNotices) Marshal() ([]byte, error) { + return json.Marshal(r) +} + +func UnmarshalProductNotices(data []byte) (ProductNotices, error) { + var r ProductNotices + err := json.Unmarshal(data, &r) + return r, err +} + +// List of product notices. Order is important and is used to resolve priorities. +// Each notice will only be show if conditions are met. +type ProductNotice struct { + Conditions Conditions `json:"conditions"` + ID string `json:"id"` // Unique identifier for this notice. Can be a running number. Used for storing 'viewed'; state on the server. + LocalizedMessages map[string]NoticeMessageInternal `json:"localizedMessages"` // Notice message data, organized by locale.; Example:; "localizedMessages": {; "en": { "title": "English", description: "English description"},; "frFR": { "title": "Frances", description: "French description"}; } + Repeatable *bool `json:"repeatable,omitempty"` // Configurable flag if the notice should reappear after it’s seen and dismissed +} + +func (n *ProductNotice) SysAdminOnly() bool { + return n.Conditions.Audience != nil && *n.Conditions.Audience == NoticeAudience_Sysadmin +} + +func (n *ProductNotice) TeamAdminOnly() bool { + return n.Conditions.Audience != nil && *n.Conditions.Audience == NoticeAudience_TeamAdmin +} + +type Conditions struct { + Audience *NoticeAudience `json:"audience,omitempty"` + ClientType *NoticeClientType `json:"clientType,omitempty"` // Only show the notice on specific clients. Defaults to 'all' + DesktopVersion []string `json:"desktopVersion,omitempty"` // What desktop client versions does this notice apply to.; Format: semver ranges (https://devhints.io/semver); Example: [">=1.2.3 < ~2.4.x"]; Example: ["= 2020-03-01T00:00:00Z" - show after specified date; "< 2020-03-01T00:00:00Z" - show before the specified date; "> 2020-03-01T00:00:00Z <= 2020-04-01T00:00:00Z" - show only between the specified dates + InstanceType *NoticeInstanceType `json:"instanceType,omitempty"` + MobileVersion []string `json:"mobileVersion,omitempty"` // What mobile client versions does this notice apply to.; Format: semver ranges (https://devhints.io/semver); Example: [">=1.2.3 < ~2.4.x"]; Example: ["=1.2.3 < ~2.4.x"]; Example: [" -1 { + pn.Platform = deviceId[:index] + pn.DeviceId = deviceId[index+1:] + } +} + +func PushNotificationFromJson(data io.Reader) (*PushNotification, error) { + if data == nil { + return nil, errors.New("push notification data can't be nil") + } + var pn *PushNotification + if err := json.NewDecoder(data).Decode(&pn); err != nil { + return nil, err + } + return pn, nil +} + +func PushNotificationAckFromJson(data io.Reader) (*PushNotificationAck, error) { + if data == nil { + return nil, errors.New("push notification data can't be nil") + } + var ack *PushNotificationAck + if err := json.NewDecoder(data).Decode(&ack); err != nil { + return nil, err + } + return ack, nil +} + +func (ack *PushNotificationAck) ToJson() string { + b, _ := json.Marshal(ack) + return string(b) +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/push_response.go b/vendor/github.com/mattermost/mattermost-server/v5/model/push_response.go similarity index 87% rename from vendor/github.com/mattermost/mattermost-server/model/push_response.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/push_response.go index 1434a2b..227a089 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/push_response.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/push_response.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -37,8 +37,8 @@ func NewErrorPushResponse(message string) PushResponse { return m } -func (me *PushResponse) ToJson() string { - b, _ := json.Marshal(me) +func (pr *PushResponse) ToJson() string { + b, _ := json.Marshal(pr) return string(b) } @@ -48,7 +48,6 @@ func PushResponseFromJson(data io.Reader) PushResponse { var objmap PushResponse if err := decoder.Decode(&objmap); err != nil { return make(map[string]string) - } else { - return objmap } + return objmap } diff --git a/vendor/github.com/mattermost/mattermost-server/model/reaction.go b/vendor/github.com/mattermost/mattermost-server/v5/model/reaction.go similarity index 89% rename from vendor/github.com/mattermost/mattermost-server/model/reaction.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/reaction.go index 550918a..6f950d7 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/reaction.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/reaction.go @@ -1,5 +1,5 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -27,9 +27,8 @@ func ReactionFromJson(data io.Reader) *Reaction { if err := json.NewDecoder(data).Decode(&o); err != nil { return nil - } else { - return &o } + return &o } func ReactionsToJson(o []*Reaction) string { @@ -48,9 +47,8 @@ func MapPostIdToReactionsFromJson(data io.Reader) map[string][]*Reaction { var objmap map[string][]*Reaction if err := decoder.Decode(&objmap); err != nil { return make(map[string][]*Reaction) - } else { - return objmap } + return objmap } func ReactionsFromJson(data io.Reader) []*Reaction { @@ -58,17 +56,16 @@ func ReactionsFromJson(data io.Reader) []*Reaction { if err := json.NewDecoder(data).Decode(&o); err != nil { return nil - } else { - return o } + return o } func (o *Reaction) IsValid() *AppError { - if len(o.UserId) != 26 { + if !IsValidId(o.UserId) { return NewAppError("Reaction.IsValid", "model.reaction.is_valid.user_id.app_error", nil, "user_id="+o.UserId, http.StatusBadRequest) } - if len(o.PostId) != 26 { + if !IsValidId(o.PostId) { return NewAppError("Reaction.IsValid", "model.reaction.is_valid.post_id.app_error", nil, "post_id="+o.PostId, http.StatusBadRequest) } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/role.go b/vendor/github.com/mattermost/mattermost-server/v5/model/role.go new file mode 100644 index 0000000..b8bf894 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/role.go @@ -0,0 +1,784 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "strings" +) + +// SysconsoleAncillaryPermissions maps the non-sysconsole permissions required by each sysconsole view. +var SysconsoleAncillaryPermissions map[string][]*Permission +var SystemManagerDefaultPermissions []string +var SystemUserManagerDefaultPermissions []string +var SystemReadOnlyAdminDefaultPermissions []string + +var BuiltInSchemeManagedRoleIDs []string + +var NewSystemRoleIDs []string + +func init() { + NewSystemRoleIDs = []string{ + SYSTEM_USER_MANAGER_ROLE_ID, + SYSTEM_READ_ONLY_ADMIN_ROLE_ID, + SYSTEM_MANAGER_ROLE_ID, + } + + BuiltInSchemeManagedRoleIDs = append([]string{ + SYSTEM_GUEST_ROLE_ID, + SYSTEM_USER_ROLE_ID, + SYSTEM_ADMIN_ROLE_ID, + SYSTEM_POST_ALL_ROLE_ID, + SYSTEM_POST_ALL_PUBLIC_ROLE_ID, + SYSTEM_USER_ACCESS_TOKEN_ROLE_ID, + + TEAM_GUEST_ROLE_ID, + TEAM_USER_ROLE_ID, + TEAM_ADMIN_ROLE_ID, + TEAM_POST_ALL_ROLE_ID, + TEAM_POST_ALL_PUBLIC_ROLE_ID, + + CHANNEL_GUEST_ROLE_ID, + CHANNEL_USER_ROLE_ID, + CHANNEL_ADMIN_ROLE_ID, + }, NewSystemRoleIDs...) + + // When updating the values here, the values in mattermost-redux must also be updated. + SysconsoleAncillaryPermissions = map[string][]*Permission{ + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_CHANNELS.Id: { + PERMISSION_READ_PUBLIC_CHANNEL, + PERMISSION_READ_CHANNEL, + PERMISSION_READ_PUBLIC_CHANNEL_GROUPS, + PERMISSION_READ_PRIVATE_CHANNEL_GROUPS, + }, + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_USERS.Id: { + PERMISSION_READ_OTHER_USERS_TEAMS, + }, + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_TEAMS.Id: { + PERMISSION_LIST_PRIVATE_TEAMS, + PERMISSION_LIST_PUBLIC_TEAMS, + PERMISSION_VIEW_TEAM, + }, + PERMISSION_SYSCONSOLE_WRITE_COMPLIANCE.Id: { + PERMISSION_MANAGE_JOBS, + }, + PERMISSION_SYSCONSOLE_READ_COMPLIANCE.Id: { + PERMISSION_READ_JOBS, + PERMISSION_DOWNLOAD_COMPLIANCE_EXPORT_RESULT, + }, + PERMISSION_SYSCONSOLE_READ_ENVIRONMENT.Id: { + PERMISSION_READ_JOBS, + }, + PERMISSION_SYSCONSOLE_READ_AUTHENTICATION.Id: { + PERMISSION_READ_JOBS, + }, + PERMISSION_SYSCONSOLE_READ_REPORTING.Id: { + PERMISSION_VIEW_TEAM, + }, + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_USERS.Id: { + PERMISSION_EDIT_OTHER_USERS, + PERMISSION_DEMOTE_TO_GUEST, + PERMISSION_PROMOTE_GUEST, + }, + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_CHANNELS.Id: { + PERMISSION_MANAGE_TEAM, + PERMISSION_MANAGE_PUBLIC_CHANNEL_PROPERTIES, + PERMISSION_MANAGE_PRIVATE_CHANNEL_PROPERTIES, + PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS, + PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS, + PERMISSION_DELETE_PRIVATE_CHANNEL, + PERMISSION_DELETE_PUBLIC_CHANNEL, + PERMISSION_MANAGE_CHANNEL_ROLES, + PERMISSION_CONVERT_PUBLIC_CHANNEL_TO_PRIVATE, + PERMISSION_CONVERT_PRIVATE_CHANNEL_TO_PUBLIC, + }, + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_TEAMS.Id: { + PERMISSION_MANAGE_TEAM, + PERMISSION_MANAGE_TEAM_ROLES, + PERMISSION_REMOVE_USER_FROM_TEAM, + PERMISSION_JOIN_PRIVATE_TEAMS, + PERMISSION_JOIN_PUBLIC_TEAMS, + PERMISSION_ADD_USER_TO_TEAM, + }, + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_GROUPS.Id: { + PERMISSION_MANAGE_TEAM, + PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS, + PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS, + PERMISSION_CONVERT_PUBLIC_CHANNEL_TO_PRIVATE, + PERMISSION_CONVERT_PRIVATE_CHANNEL_TO_PUBLIC, + }, + PERMISSION_SYSCONSOLE_WRITE_ENVIRONMENT.Id: { + PERMISSION_MANAGE_JOBS, + }, + PERMISSION_SYSCONSOLE_WRITE_SITE.Id: { + PERMISSION_EDIT_BRAND, + }, + } + + SystemUserManagerDefaultPermissions = []string{ + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_GROUPS.Id, + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_TEAMS.Id, + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_CHANNELS.Id, + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_PERMISSIONS.Id, + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_GROUPS.Id, + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_TEAMS.Id, + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_CHANNELS.Id, + PERMISSION_SYSCONSOLE_READ_AUTHENTICATION.Id, + } + + SystemReadOnlyAdminDefaultPermissions = []string{ + PERMISSION_SYSCONSOLE_READ_ABOUT.Id, + PERMISSION_SYSCONSOLE_READ_REPORTING.Id, + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_USERS.Id, + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_GROUPS.Id, + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_TEAMS.Id, + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_CHANNELS.Id, + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_PERMISSIONS.Id, + PERMISSION_SYSCONSOLE_READ_ENVIRONMENT.Id, + PERMISSION_SYSCONSOLE_READ_SITE.Id, + PERMISSION_SYSCONSOLE_READ_AUTHENTICATION.Id, + PERMISSION_SYSCONSOLE_READ_PLUGINS.Id, + PERMISSION_SYSCONSOLE_READ_COMPLIANCE.Id, + PERMISSION_SYSCONSOLE_READ_INTEGRATIONS.Id, + PERMISSION_SYSCONSOLE_READ_EXPERIMENTAL.Id, + } + + SystemManagerDefaultPermissions = []string{ + PERMISSION_SYSCONSOLE_READ_ABOUT.Id, + PERMISSION_SYSCONSOLE_READ_REPORTING.Id, + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_GROUPS.Id, + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_TEAMS.Id, + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_CHANNELS.Id, + PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_PERMISSIONS.Id, + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_GROUPS.Id, + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_TEAMS.Id, + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_CHANNELS.Id, + PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_PERMISSIONS.Id, + PERMISSION_SYSCONSOLE_READ_ENVIRONMENT.Id, + PERMISSION_SYSCONSOLE_WRITE_ENVIRONMENT.Id, + PERMISSION_SYSCONSOLE_READ_SITE.Id, + PERMISSION_SYSCONSOLE_WRITE_SITE.Id, + PERMISSION_SYSCONSOLE_READ_AUTHENTICATION.Id, + PERMISSION_SYSCONSOLE_READ_PLUGINS.Id, + PERMISSION_SYSCONSOLE_READ_INTEGRATIONS.Id, + PERMISSION_SYSCONSOLE_WRITE_INTEGRATIONS.Id, + } + + // Add the ancillary permissions to each system role + SystemUserManagerDefaultPermissions = addAncillaryPermissions(SystemUserManagerDefaultPermissions) + SystemReadOnlyAdminDefaultPermissions = addAncillaryPermissions(SystemReadOnlyAdminDefaultPermissions) + SystemManagerDefaultPermissions = addAncillaryPermissions(SystemManagerDefaultPermissions) +} + +type RoleType string +type RoleScope string + +const ( + SYSTEM_GUEST_ROLE_ID = "system_guest" + SYSTEM_USER_ROLE_ID = "system_user" + SYSTEM_ADMIN_ROLE_ID = "system_admin" + SYSTEM_POST_ALL_ROLE_ID = "system_post_all" + SYSTEM_POST_ALL_PUBLIC_ROLE_ID = "system_post_all_public" + SYSTEM_USER_ACCESS_TOKEN_ROLE_ID = "system_user_access_token" + SYSTEM_USER_MANAGER_ROLE_ID = "system_user_manager" + SYSTEM_READ_ONLY_ADMIN_ROLE_ID = "system_read_only_admin" + SYSTEM_MANAGER_ROLE_ID = "system_manager" + + TEAM_GUEST_ROLE_ID = "team_guest" + TEAM_USER_ROLE_ID = "team_user" + TEAM_ADMIN_ROLE_ID = "team_admin" + TEAM_POST_ALL_ROLE_ID = "team_post_all" + TEAM_POST_ALL_PUBLIC_ROLE_ID = "team_post_all_public" + + CHANNEL_GUEST_ROLE_ID = "channel_guest" + CHANNEL_USER_ROLE_ID = "channel_user" + CHANNEL_ADMIN_ROLE_ID = "channel_admin" + + ROLE_NAME_MAX_LENGTH = 64 + ROLE_DISPLAY_NAME_MAX_LENGTH = 128 + ROLE_DESCRIPTION_MAX_LENGTH = 1024 + + RoleScopeSystem RoleScope = "System" + RoleScopeTeam RoleScope = "Team" + RoleScopeChannel RoleScope = "Channel" + + RoleTypeGuest RoleType = "Guest" + RoleTypeUser RoleType = "User" + RoleTypeAdmin RoleType = "Admin" +) + +type Role struct { + Id string `json:"id"` + Name string `json:"name"` + DisplayName string `json:"display_name"` + Description string `json:"description"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + DeleteAt int64 `json:"delete_at"` + Permissions []string `json:"permissions"` + SchemeManaged bool `json:"scheme_managed"` + BuiltIn bool `json:"built_in"` +} + +type RolePatch struct { + Permissions *[]string `json:"permissions"` +} + +type RolePermissions struct { + RoleID string + Permissions []string +} + +func (r *Role) ToJson() string { + b, _ := json.Marshal(r) + return string(b) +} + +func RoleFromJson(data io.Reader) *Role { + var r *Role + json.NewDecoder(data).Decode(&r) + return r +} + +func RoleListToJson(r []*Role) string { + b, _ := json.Marshal(r) + return string(b) +} + +func RoleListFromJson(data io.Reader) []*Role { + var roles []*Role + json.NewDecoder(data).Decode(&roles) + return roles +} + +func (r *RolePatch) ToJson() string { + b, _ := json.Marshal(r) + return string(b) +} + +func RolePatchFromJson(data io.Reader) *RolePatch { + var rolePatch *RolePatch + json.NewDecoder(data).Decode(&rolePatch) + return rolePatch +} + +func (r *Role) Patch(patch *RolePatch) { + if patch.Permissions != nil { + r.Permissions = *patch.Permissions + } +} + +// MergeChannelHigherScopedPermissions is meant to be invoked on a channel scheme's role and merges the higher-scoped +// channel role's permissions. +func (r *Role) MergeChannelHigherScopedPermissions(higherScopedPermissions *RolePermissions) { + mergedPermissions := []string{} + + higherScopedPermissionsMap := AsStringBoolMap(higherScopedPermissions.Permissions) + rolePermissionsMap := AsStringBoolMap(r.Permissions) + + for _, cp := range AllPermissions { + if cp.Scope != PermissionScopeChannel { + continue + } + + _, presentOnHigherScope := higherScopedPermissionsMap[cp.Id] + + // For the channel admin role always look to the higher scope to determine if the role has their permission. + // The channel admin is a special case because they're not part of the UI to be "channel moderated", only + // channel members and channel guests are. + if higherScopedPermissions.RoleID == CHANNEL_ADMIN_ROLE_ID && presentOnHigherScope { + mergedPermissions = append(mergedPermissions, cp.Id) + continue + } + + _, permissionIsModerated := ChannelModeratedPermissionsMap[cp.Id] + if permissionIsModerated { + _, presentOnRole := rolePermissionsMap[cp.Id] + if presentOnRole && presentOnHigherScope { + mergedPermissions = append(mergedPermissions, cp.Id) + } + } else { + if presentOnHigherScope { + mergedPermissions = append(mergedPermissions, cp.Id) + } + } + } + + r.Permissions = mergedPermissions +} + +// Returns an array of permissions that are in either role.Permissions +// or patch.Permissions, but not both. +func PermissionsChangedByPatch(role *Role, patch *RolePatch) []string { + var result []string + + if patch.Permissions == nil { + return result + } + + roleMap := make(map[string]bool) + patchMap := make(map[string]bool) + + for _, permission := range role.Permissions { + roleMap[permission] = true + } + + for _, permission := range *patch.Permissions { + patchMap[permission] = true + } + + for _, permission := range role.Permissions { + if !patchMap[permission] { + result = append(result, permission) + } + } + + for _, permission := range *patch.Permissions { + if !roleMap[permission] { + result = append(result, permission) + } + } + + return result +} + +func ChannelModeratedPermissionsChangedByPatch(role *Role, patch *RolePatch) []string { + var result []string + + if role == nil { + return result + } + + if patch.Permissions == nil { + return result + } + + roleMap := make(map[string]bool) + patchMap := make(map[string]bool) + + for _, permission := range role.Permissions { + if channelModeratedPermissionName, found := ChannelModeratedPermissionsMap[permission]; found { + roleMap[channelModeratedPermissionName] = true + } + } + + for _, permission := range *patch.Permissions { + if channelModeratedPermissionName, found := ChannelModeratedPermissionsMap[permission]; found { + patchMap[channelModeratedPermissionName] = true + } + } + + for permissionKey := range roleMap { + if !patchMap[permissionKey] { + result = append(result, permissionKey) + } + } + + for permissionKey := range patchMap { + if !roleMap[permissionKey] { + result = append(result, permissionKey) + } + } + + return result +} + +// GetChannelModeratedPermissions returns a map of channel moderated permissions that the role has access to +func (r *Role) GetChannelModeratedPermissions(channelType string) map[string]bool { + moderatedPermissions := make(map[string]bool) + for _, permission := range r.Permissions { + if _, found := ChannelModeratedPermissionsMap[permission]; !found { + continue + } + + for moderated, moderatedPermissionValue := range ChannelModeratedPermissionsMap { + // the moderated permission has already been found to be true so skip this iteration + if moderatedPermissions[moderatedPermissionValue] { + continue + } + + if moderated == permission { + // Special case where the channel moderated permission for `manage_members` is different depending on whether the channel is private or public + if moderated == PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS.Id || moderated == PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS.Id { + canManagePublic := channelType == CHANNEL_OPEN && moderated == PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS.Id + canManagePrivate := channelType == CHANNEL_PRIVATE && moderated == PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS.Id + moderatedPermissions[moderatedPermissionValue] = canManagePublic || canManagePrivate + } else { + moderatedPermissions[moderatedPermissionValue] = true + } + } + } + } + + return moderatedPermissions +} + +// RolePatchFromChannelModerationsPatch Creates and returns a RolePatch based on a slice of ChannelModerationPatchs, roleName is expected to be either "members" or "guests". +func (r *Role) RolePatchFromChannelModerationsPatch(channelModerationsPatch []*ChannelModerationPatch, roleName string) *RolePatch { + permissionsToAddToPatch := make(map[string]bool) + + // Iterate through the list of existing permissions on the role and append permissions that we want to keep. + for _, permission := range r.Permissions { + // Permission is not moderated so dont add it to the patch and skip the channelModerationsPatch + if _, isModerated := ChannelModeratedPermissionsMap[permission]; !isModerated { + continue + } + + permissionEnabled := true + // Check if permission has a matching moderated permission name inside the channel moderation patch + for _, channelModerationPatch := range channelModerationsPatch { + if *channelModerationPatch.Name == ChannelModeratedPermissionsMap[permission] { + // Permission key exists in patch with a value of false so skip over it + if roleName == "members" { + if channelModerationPatch.Roles.Members != nil && !*channelModerationPatch.Roles.Members { + permissionEnabled = false + } + } else if roleName == "guests" { + if channelModerationPatch.Roles.Guests != nil && !*channelModerationPatch.Roles.Guests { + permissionEnabled = false + } + } + } + } + + if permissionEnabled { + permissionsToAddToPatch[permission] = true + } + } + + // Iterate through the patch and add any permissions that dont already exist on the role + for _, channelModerationPatch := range channelModerationsPatch { + for permission, moderatedPermissionName := range ChannelModeratedPermissionsMap { + if roleName == "members" && channelModerationPatch.Roles.Members != nil && *channelModerationPatch.Roles.Members && *channelModerationPatch.Name == moderatedPermissionName { + permissionsToAddToPatch[permission] = true + } + + if roleName == "guests" && channelModerationPatch.Roles.Guests != nil && *channelModerationPatch.Roles.Guests && *channelModerationPatch.Name == moderatedPermissionName { + permissionsToAddToPatch[permission] = true + } + } + } + + patchPermissions := make([]string, 0, len(permissionsToAddToPatch)) + for permission := range permissionsToAddToPatch { + patchPermissions = append(patchPermissions, permission) + } + + return &RolePatch{Permissions: &patchPermissions} +} + +func (r *Role) IsValid() bool { + if !IsValidId(r.Id) { + return false + } + + return r.IsValidWithoutId() +} + +func (r *Role) IsValidWithoutId() bool { + if !IsValidRoleName(r.Name) { + return false + } + + if len(r.DisplayName) == 0 || len(r.DisplayName) > ROLE_DISPLAY_NAME_MAX_LENGTH { + return false + } + + if len(r.Description) > ROLE_DESCRIPTION_MAX_LENGTH { + return false + } + + check := func(perms []*Permission, permission string) bool { + for _, p := range perms { + if permission == p.Id { + return true + } + } + return false + } + for _, permission := range r.Permissions { + permissionValidated := check(AllPermissions, permission) || check(DeprecatedPermissions, permission) + if !permissionValidated { + return false + } + } + + return true +} + +func CleanRoleNames(roleNames []string) ([]string, bool) { + var cleanedRoleNames []string + for _, roleName := range roleNames { + if strings.TrimSpace(roleName) == "" { + continue + } + + if !IsValidRoleName(roleName) { + return roleNames, false + } + + cleanedRoleNames = append(cleanedRoleNames, roleName) + } + + return cleanedRoleNames, true +} + +func IsValidRoleName(roleName string) bool { + if len(roleName) <= 0 || len(roleName) > ROLE_NAME_MAX_LENGTH { + return false + } + + if strings.TrimLeft(roleName, "abcdefghijklmnopqrstuvwxyz0123456789_") != "" { + return false + } + + return true +} + +func MakeDefaultRoles() map[string]*Role { + roles := make(map[string]*Role) + + roles[CHANNEL_GUEST_ROLE_ID] = &Role{ + Name: "channel_guest", + DisplayName: "authentication.roles.channel_guest.name", + Description: "authentication.roles.channel_guest.description", + Permissions: []string{ + PERMISSION_READ_CHANNEL.Id, + PERMISSION_ADD_REACTION.Id, + PERMISSION_REMOVE_REACTION.Id, + PERMISSION_UPLOAD_FILE.Id, + PERMISSION_EDIT_POST.Id, + PERMISSION_CREATE_POST.Id, + PERMISSION_USE_CHANNEL_MENTIONS.Id, + PERMISSION_USE_SLASH_COMMANDS.Id, + }, + SchemeManaged: true, + BuiltIn: true, + } + + roles[CHANNEL_USER_ROLE_ID] = &Role{ + Name: "channel_user", + DisplayName: "authentication.roles.channel_user.name", + Description: "authentication.roles.channel_user.description", + Permissions: []string{ + PERMISSION_READ_CHANNEL.Id, + PERMISSION_ADD_REACTION.Id, + PERMISSION_REMOVE_REACTION.Id, + PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS.Id, + PERMISSION_UPLOAD_FILE.Id, + PERMISSION_GET_PUBLIC_LINK.Id, + PERMISSION_CREATE_POST.Id, + PERMISSION_USE_CHANNEL_MENTIONS.Id, + PERMISSION_USE_SLASH_COMMANDS.Id, + }, + SchemeManaged: true, + BuiltIn: true, + } + + roles[CHANNEL_ADMIN_ROLE_ID] = &Role{ + Name: "channel_admin", + DisplayName: "authentication.roles.channel_admin.name", + Description: "authentication.roles.channel_admin.description", + Permissions: []string{ + PERMISSION_MANAGE_CHANNEL_ROLES.Id, + PERMISSION_USE_GROUP_MENTIONS.Id, + }, + SchemeManaged: true, + BuiltIn: true, + } + + roles[TEAM_GUEST_ROLE_ID] = &Role{ + Name: "team_guest", + DisplayName: "authentication.roles.team_guest.name", + Description: "authentication.roles.team_guest.description", + Permissions: []string{ + PERMISSION_VIEW_TEAM.Id, + }, + SchemeManaged: true, + BuiltIn: true, + } + + roles[TEAM_USER_ROLE_ID] = &Role{ + Name: "team_user", + DisplayName: "authentication.roles.team_user.name", + Description: "authentication.roles.team_user.description", + Permissions: []string{ + PERMISSION_LIST_TEAM_CHANNELS.Id, + PERMISSION_JOIN_PUBLIC_CHANNELS.Id, + PERMISSION_READ_PUBLIC_CHANNEL.Id, + PERMISSION_VIEW_TEAM.Id, + }, + SchemeManaged: true, + BuiltIn: true, + } + + roles[TEAM_POST_ALL_ROLE_ID] = &Role{ + Name: "team_post_all", + DisplayName: "authentication.roles.team_post_all.name", + Description: "authentication.roles.team_post_all.description", + Permissions: []string{ + PERMISSION_CREATE_POST.Id, + PERMISSION_USE_CHANNEL_MENTIONS.Id, + }, + SchemeManaged: false, + BuiltIn: true, + } + + roles[TEAM_POST_ALL_PUBLIC_ROLE_ID] = &Role{ + Name: "team_post_all_public", + DisplayName: "authentication.roles.team_post_all_public.name", + Description: "authentication.roles.team_post_all_public.description", + Permissions: []string{ + PERMISSION_CREATE_POST_PUBLIC.Id, + PERMISSION_USE_CHANNEL_MENTIONS.Id, + }, + SchemeManaged: false, + BuiltIn: true, + } + + roles[TEAM_ADMIN_ROLE_ID] = &Role{ + Name: "team_admin", + DisplayName: "authentication.roles.team_admin.name", + Description: "authentication.roles.team_admin.description", + Permissions: []string{ + PERMISSION_REMOVE_USER_FROM_TEAM.Id, + PERMISSION_MANAGE_TEAM.Id, + PERMISSION_IMPORT_TEAM.Id, + PERMISSION_MANAGE_TEAM_ROLES.Id, + PERMISSION_MANAGE_CHANNEL_ROLES.Id, + PERMISSION_MANAGE_OTHERS_INCOMING_WEBHOOKS.Id, + PERMISSION_MANAGE_OTHERS_OUTGOING_WEBHOOKS.Id, + PERMISSION_MANAGE_SLASH_COMMANDS.Id, + PERMISSION_MANAGE_OTHERS_SLASH_COMMANDS.Id, + PERMISSION_MANAGE_INCOMING_WEBHOOKS.Id, + PERMISSION_MANAGE_OUTGOING_WEBHOOKS.Id, + PERMISSION_CONVERT_PUBLIC_CHANNEL_TO_PRIVATE.Id, + PERMISSION_CONVERT_PRIVATE_CHANNEL_TO_PUBLIC.Id, + }, + SchemeManaged: true, + BuiltIn: true, + } + + roles[SYSTEM_GUEST_ROLE_ID] = &Role{ + Name: "system_guest", + DisplayName: "authentication.roles.global_guest.name", + Description: "authentication.roles.global_guest.description", + Permissions: []string{ + PERMISSION_CREATE_DIRECT_CHANNEL.Id, + PERMISSION_CREATE_GROUP_CHANNEL.Id, + }, + SchemeManaged: true, + BuiltIn: true, + } + + roles[SYSTEM_USER_ROLE_ID] = &Role{ + Name: "system_user", + DisplayName: "authentication.roles.global_user.name", + Description: "authentication.roles.global_user.description", + Permissions: []string{ + PERMISSION_LIST_PUBLIC_TEAMS.Id, + PERMISSION_JOIN_PUBLIC_TEAMS.Id, + PERMISSION_CREATE_DIRECT_CHANNEL.Id, + PERMISSION_CREATE_GROUP_CHANNEL.Id, + PERMISSION_VIEW_MEMBERS.Id, + }, + SchemeManaged: true, + BuiltIn: true, + } + + roles[SYSTEM_POST_ALL_ROLE_ID] = &Role{ + Name: "system_post_all", + DisplayName: "authentication.roles.system_post_all.name", + Description: "authentication.roles.system_post_all.description", + Permissions: []string{ + PERMISSION_CREATE_POST.Id, + PERMISSION_USE_CHANNEL_MENTIONS.Id, + }, + SchemeManaged: false, + BuiltIn: true, + } + + roles[SYSTEM_POST_ALL_PUBLIC_ROLE_ID] = &Role{ + Name: "system_post_all_public", + DisplayName: "authentication.roles.system_post_all_public.name", + Description: "authentication.roles.system_post_all_public.description", + Permissions: []string{ + PERMISSION_CREATE_POST_PUBLIC.Id, + PERMISSION_USE_CHANNEL_MENTIONS.Id, + }, + SchemeManaged: false, + BuiltIn: true, + } + + roles[SYSTEM_USER_ACCESS_TOKEN_ROLE_ID] = &Role{ + Name: "system_user_access_token", + DisplayName: "authentication.roles.system_user_access_token.name", + Description: "authentication.roles.system_user_access_token.description", + Permissions: []string{ + PERMISSION_CREATE_USER_ACCESS_TOKEN.Id, + PERMISSION_READ_USER_ACCESS_TOKEN.Id, + PERMISSION_REVOKE_USER_ACCESS_TOKEN.Id, + }, + SchemeManaged: false, + BuiltIn: true, + } + + roles[SYSTEM_USER_MANAGER_ROLE_ID] = &Role{ + Name: "system_user_manager", + DisplayName: "authentication.roles.system_user_manager.name", + Description: "authentication.roles.system_user_manager.description", + Permissions: SystemUserManagerDefaultPermissions, + SchemeManaged: false, + BuiltIn: true, + } + + roles[SYSTEM_READ_ONLY_ADMIN_ROLE_ID] = &Role{ + Name: "system_read_only_admin", + DisplayName: "authentication.roles.system_read_only_admin.name", + Description: "authentication.roles.system_read_only_admin.description", + Permissions: SystemReadOnlyAdminDefaultPermissions, + SchemeManaged: false, + BuiltIn: true, + } + + roles[SYSTEM_MANAGER_ROLE_ID] = &Role{ + Name: "system_manager", + DisplayName: "authentication.roles.system_manager.name", + Description: "authentication.roles.system_manager.description", + Permissions: SystemManagerDefaultPermissions, + SchemeManaged: false, + BuiltIn: true, + } + + allPermissionIDs := []string{} + for _, permission := range AllPermissions { + allPermissionIDs = append(allPermissionIDs, permission.Id) + } + + roles[SYSTEM_ADMIN_ROLE_ID] = &Role{ + Name: "system_admin", + DisplayName: "authentication.roles.global_admin.name", + Description: "authentication.roles.global_admin.description", + // System admins can do anything channel and team admins can do + // plus everything members of teams and channels can do to all teams + // and channels on the system + Permissions: allPermissionIDs, + SchemeManaged: true, + BuiltIn: true, + } + + return roles +} + +func addAncillaryPermissions(permissions []string) []string { + for _, permission := range permissions { + if ancillaryPermissions, ok := SysconsoleAncillaryPermissions[permission]; ok { + for _, ancillaryPermission := range ancillaryPermissions { + permissions = append(permissions, ancillaryPermission.Id) + } + } + } + return permissions +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/saml.go b/vendor/github.com/mattermost/mattermost-server/v5/model/saml.go new file mode 100644 index 0000000..feaf325 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/saml.go @@ -0,0 +1,200 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "encoding/xml" + "io" + "time" +) + +const ( + USER_AUTH_SERVICE_SAML = "saml" + USER_AUTH_SERVICE_SAML_TEXT = "SAML" + USER_AUTH_SERVICE_IS_SAML = "isSaml" + USER_AUTH_SERVICE_IS_MOBILE = "isMobile" + USER_AUTH_SERVICE_IS_OAUTH = "isOAuthUser" +) + +type SamlAuthRequest struct { + Base64AuthRequest string + URL string + RelayState string +} + +type SamlCertificateStatus struct { + IdpCertificateFile bool `json:"idp_certificate_file"` + PrivateKeyFile bool `json:"private_key_file"` + PublicCertificateFile bool `json:"public_certificate_file"` +} + +type SamlMetadataResponse struct { + IdpDescriptorUrl string `json:"idp_descriptor_url"` + IdpUrl string `json:"idp_url"` + IdpPublicCertificate string `json:"idp_public_certificate"` +} + +type NameIDFormat struct { + XMLName xml.Name + Format string `xml:",attr,omitempty"` + Value string `xml:",innerxml"` +} + +type NameID struct { + NameQualifier string `xml:",attr"` + SPNameQualifier string `xml:",attr"` + Format string `xml:",attr,omitempty"` + SPProvidedID string `xml:",attr"` + Value string `xml:",chardata"` +} + +type AttributeValue struct { + Type string `xml:"http://www.w3.org/2001/XMLSchema-instance type,attr"` + Value string `xml:",chardata"` + NameID *NameID +} + +type Attribute struct { + XMLName xml.Name + FriendlyName string `xml:",attr"` + Name string `xml:",attr"` + NameFormat string `xml:",attr"` + Values []AttributeValue `xml:"AttributeValue"` +} + +type Endpoint struct { + XMLName xml.Name + Binding string `xml:"Binding,attr"` + Location string `xml:"Location,attr"` + ResponseLocation string `xml:"ResponseLocation,attr,omitempty"` +} + +type IndexedEndpoint struct { + XMLName xml.Name + Binding string `xml:"Binding,attr"` + Location string `xml:"Location,attr"` + ResponseLocation *string `xml:"ResponseLocation,attr,omitempty"` + Index int `xml:"index,attr"` + IsDefault *bool `xml:"isDefault,attr"` +} + +type IDPSSODescriptor struct { + XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:metadata IDPSSODescriptor"` + SSODescriptor + WantAuthnRequestsSigned *bool `xml:",attr"` + + SingleSignOnServices []Endpoint `xml:"SingleSignOnService"` + NameIDMappingServices []Endpoint `xml:"NameIDMappingService"` + AssertionIDRequestServices []Endpoint `xml:"AssertionIDRequestService"` + AttributeProfiles []string `xml:"AttributeProfile"` + Attributes []Attribute `xml:"Attribute"` +} + +type SSODescriptor struct { + XMLName xml.Name + RoleDescriptor + ArtifactResolutionServices []IndexedEndpoint `xml:"ArtifactResolutionService"` + SingleLogoutServices []Endpoint `xml:"SingleLogoutService"` + ManageNameIDServices []Endpoint `xml:"ManageNameIDService"` + NameIDFormats []NameIDFormat `xml:"NameIDFormat"` +} + +type X509Certificate struct { + XMLName xml.Name + Cert string `xml:",innerxml"` +} + +type X509Data struct { + XMLName xml.Name + X509Certificate X509Certificate `xml:"X509Certificate"` +} + +type KeyInfo struct { + XMLName xml.Name + DS string `xml:"xmlns:ds,attr"` + X509Data X509Data `xml:"X509Data"` +} +type EncryptionMethod struct { + Algorithm string `xml:"Algorithm,attr"` +} + +type KeyDescriptor struct { + XMLName xml.Name + Use string `xml:"use,attr,omitempty"` + KeyInfo KeyInfo `xml:"http://www.w3.org/2000/09/xmldsig# KeyInfo,omitempty"` +} + +type RoleDescriptor struct { + XMLName xml.Name + ID string `xml:",attr,omitempty"` + ValidUntil time.Time `xml:"validUntil,attr,omitempty"` + CacheDuration time.Duration `xml:"cacheDuration,attr,omitempty"` + ProtocolSupportEnumeration string `xml:"protocolSupportEnumeration,attr"` + ErrorURL string `xml:"errorURL,attr,omitempty"` + KeyDescriptors []KeyDescriptor `xml:"KeyDescriptor,omitempty"` + Organization *Organization `xml:"Organization,omitempty"` + ContactPersons []ContactPerson `xml:"ContactPerson,omitempty"` +} + +type ContactPerson struct { + XMLName xml.Name + ContactType string `xml:"contactType,attr"` + Company string + GivenName string + SurName string + EmailAddresses []string `xml:"EmailAddress"` + TelephoneNumbers []string `xml:"TelephoneNumber"` +} + +type LocalizedName struct { + Lang string `xml:"xml lang,attr"` + Value string `xml:",chardata"` +} + +type LocalizedURI struct { + Lang string `xml:"xml lang,attr"` + Value string `xml:",chardata"` +} + +type Organization struct { + XMLName xml.Name + OrganizationNames []LocalizedName `xml:"OrganizationName"` + OrganizationDisplayNames []LocalizedName `xml:"OrganizationDisplayName"` + OrganizationURLs []LocalizedURI `xml:"OrganizationURL"` +} + +type EntityDescriptor struct { + XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:metadata EntityDescriptor"` + EntityID string `xml:"entityID,attr"` + ID string `xml:",attr,omitempty"` + ValidUntil time.Time `xml:"validUntil,attr,omitempty"` + CacheDuration time.Duration `xml:"cacheDuration,attr,omitempty"` + RoleDescriptors []RoleDescriptor `xml:"RoleDescriptor"` + IDPSSODescriptors []IDPSSODescriptor `xml:"IDPSSODescriptor"` + Organization Organization `xml:"Organization"` + ContactPerson ContactPerson `xml:"ContactPerson"` +} + +func (s *SamlCertificateStatus) ToJson() string { + b, _ := json.Marshal(s) + return string(b) +} + +func SamlCertificateStatusFromJson(data io.Reader) *SamlCertificateStatus { + var status *SamlCertificateStatus + json.NewDecoder(data).Decode(&status) + return status +} + +func (s *SamlMetadataResponse) ToJson() string { + b, _ := json.Marshal(s) + return string(b) +} + +func SamlMetadataResponseFromJson(data io.Reader) *SamlMetadataResponse { + var status *SamlMetadataResponse + json.NewDecoder(data).Decode(&status) + return status +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/scheduled_task.go b/vendor/github.com/mattermost/mattermost-server/v5/model/scheduled_task.go similarity index 92% rename from vendor/github.com/mattermost/mattermost-server/model/scheduled_task.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/scheduled_task.go index f3529de..657cc74 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/scheduled_task.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/scheduled_task.go @@ -1,5 +1,5 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model diff --git a/vendor/github.com/mattermost/mattermost-server/model/scheme.go b/vendor/github.com/mattermost/mattermost-server/v5/model/scheme.go similarity index 94% rename from vendor/github.com/mattermost/mattermost-server/model/scheme.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/scheme.go index f40a959..a510418 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/scheme.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/scheme.go @@ -1,5 +1,5 @@ -// Copyright (c) 2018-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -29,9 +29,9 @@ type Scheme struct { Scope string `json:"scope"` DefaultTeamAdminRole string `json:"default_team_admin_role"` DefaultTeamUserRole string `json:"default_team_user_role"` - DefaultTeamGuestRole string `json:"default_team_guest_role"` DefaultChannelAdminRole string `json:"default_channel_admin_role"` DefaultChannelUserRole string `json:"default_channel_user_role"` + DefaultTeamGuestRole string `json:"default_team_guest_role"` DefaultChannelGuestRole string `json:"default_channel_guest_role"` } @@ -101,13 +101,12 @@ func SchemesFromJson(data io.Reader) []*Scheme { var schemes []*Scheme if err := json.NewDecoder(data).Decode(&schemes); err == nil { return schemes - } else { - return nil } + return nil } func (scheme *Scheme) IsValid() bool { - if len(scheme.Id) != 26 { + if !IsValidId(scheme.Id) { return false } @@ -160,15 +159,15 @@ func (scheme *Scheme) IsValidForCreate() bool { } if scheme.Scope == SCHEME_SCOPE_CHANNEL { - if len(scheme.DefaultTeamAdminRole) != 0 { + if scheme.DefaultTeamAdminRole != "" { return false } - if len(scheme.DefaultTeamUserRole) != 0 { + if scheme.DefaultTeamUserRole != "" { return false } - if len(scheme.DefaultTeamGuestRole) != 0 { + if scheme.DefaultTeamGuestRole != "" { return false } } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/search_params.go b/vendor/github.com/mattermost/mattermost-server/v5/model/search_params.go new file mode 100644 index 0000000..3a5bf84 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/search_params.go @@ -0,0 +1,397 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "net/http" + "regexp" + "strings" + "time" +) + +var searchTermPuncStart = regexp.MustCompile(`^[^\pL\d\s#"]+`) +var searchTermPuncEnd = regexp.MustCompile(`[^\pL\d\s*"]+$`) + +type SearchParams struct { + Terms string + ExcludedTerms string + IsHashtag bool + InChannels []string + ExcludedChannels []string + FromUsers []string + ExcludedUsers []string + AfterDate string + ExcludedAfterDate string + BeforeDate string + ExcludedBeforeDate string + Extensions []string + ExcludedExtensions []string + OnDate string + ExcludedDate string + OrTerms bool + IncludeDeletedChannels bool + TimeZoneOffset int + // True if this search doesn't originate from a "current user". + SearchWithoutUserId bool +} + +// Returns the epoch timestamp of the start of the day specified by SearchParams.AfterDate +func (p *SearchParams) GetAfterDateMillis() int64 { + date, err := time.Parse("2006-01-02", PadDateStringZeros(p.AfterDate)) + if err != nil { + date = time.Now() + } + + // travel forward 1 day + oneDay := time.Hour * 24 + afterDate := date.Add(oneDay) + return GetStartOfDayMillis(afterDate, p.TimeZoneOffset) +} + +// Returns the epoch timestamp of the start of the day specified by SearchParams.ExcludedAfterDate +func (p *SearchParams) GetExcludedAfterDateMillis() int64 { + date, err := time.Parse("2006-01-02", PadDateStringZeros(p.ExcludedAfterDate)) + if err != nil { + date = time.Now() + } + + // travel forward 1 day + oneDay := time.Hour * 24 + afterDate := date.Add(oneDay) + return GetStartOfDayMillis(afterDate, p.TimeZoneOffset) +} + +// Returns the epoch timestamp of the end of the day specified by SearchParams.BeforeDate +func (p *SearchParams) GetBeforeDateMillis() int64 { + date, err := time.Parse("2006-01-02", PadDateStringZeros(p.BeforeDate)) + if err != nil { + return 0 + } + + // travel back 1 day + oneDay := time.Hour * -24 + beforeDate := date.Add(oneDay) + return GetEndOfDayMillis(beforeDate, p.TimeZoneOffset) +} + +// Returns the epoch timestamp of the end of the day specified by SearchParams.ExcludedBeforeDate +func (p *SearchParams) GetExcludedBeforeDateMillis() int64 { + date, err := time.Parse("2006-01-02", PadDateStringZeros(p.ExcludedBeforeDate)) + if err != nil { + return 0 + } + + // travel back 1 day + oneDay := time.Hour * -24 + beforeDate := date.Add(oneDay) + return GetEndOfDayMillis(beforeDate, p.TimeZoneOffset) +} + +// Returns the epoch timestamps of the start and end of the day specified by SearchParams.OnDate +func (p *SearchParams) GetOnDateMillis() (int64, int64) { + date, err := time.Parse("2006-01-02", PadDateStringZeros(p.OnDate)) + if err != nil { + return 0, 0 + } + + return GetStartOfDayMillis(date, p.TimeZoneOffset), GetEndOfDayMillis(date, p.TimeZoneOffset) +} + +// Returns the epoch timestamps of the start and end of the day specified by SearchParams.ExcludedDate +func (p *SearchParams) GetExcludedDateMillis() (int64, int64) { + date, err := time.Parse("2006-01-02", PadDateStringZeros(p.ExcludedDate)) + if err != nil { + return 0, 0 + } + + return GetStartOfDayMillis(date, p.TimeZoneOffset), GetEndOfDayMillis(date, p.TimeZoneOffset) +} + +var searchFlags = [...]string{"from", "channel", "in", "before", "after", "on", "ext"} + +type flag struct { + name string + value string + exclude bool +} + +type searchWord struct { + value string + exclude bool +} + +func splitWords(text string) []string { + words := []string{} + + foundQuote := false + location := 0 + for i, char := range text { + if char == '"' { + if foundQuote { + // Grab the quoted section + word := text[location : i+1] + words = append(words, word) + foundQuote = false + location = i + 1 + } else { + nextStart := i + if i > 0 && text[i-1] == '-' { + nextStart = i - 1 + } + words = append(words, strings.Fields(text[location:nextStart])...) + foundQuote = true + location = nextStart + } + } + } + + words = append(words, strings.Fields(text[location:])...) + + return words +} + +func parseSearchFlags(input []string) ([]searchWord, []flag) { + words := []searchWord{} + flags := []flag{} + + skipNextWord := false + for i, word := range input { + if skipNextWord { + skipNextWord = false + continue + } + + isFlag := false + + if colon := strings.Index(word, ":"); colon != -1 { + var flagName string + var exclude bool + if strings.HasPrefix(word, "-") { + flagName = word[1:colon] + exclude = true + } else { + flagName = word[:colon] + exclude = false + } + + value := word[colon+1:] + + for _, searchFlag := range searchFlags { + // check for case insensitive equality + if strings.EqualFold(flagName, searchFlag) { + if value != "" { + flags = append(flags, flag{ + searchFlag, + value, + exclude, + }) + isFlag = true + } else if i < len(input)-1 { + flags = append(flags, flag{ + searchFlag, + input[i+1], + exclude, + }) + skipNextWord = true + isFlag = true + } + + if isFlag { + break + } + } + } + } + + if !isFlag { + exclude := false + if strings.HasPrefix(word, "-") { + exclude = true + } + // trim off surrounding punctuation (note that we leave trailing asterisks to allow wildcards) + word = searchTermPuncStart.ReplaceAllString(word, "") + word = searchTermPuncEnd.ReplaceAllString(word, "") + + // and remove extra pound #s + word = hashtagStart.ReplaceAllString(word, "#") + + if word != "" { + words = append(words, searchWord{ + word, + exclude, + }) + } + } + } + + return words, flags +} + +func ParseSearchParams(text string, timeZoneOffset int) []*SearchParams { + words, flags := parseSearchFlags(splitWords(text)) + + hashtagTermList := []string{} + excludedHashtagTermList := []string{} + plainTermList := []string{} + excludedPlainTermList := []string{} + + for _, word := range words { + if validHashtag.MatchString(word.value) { + if word.exclude { + excludedHashtagTermList = append(excludedHashtagTermList, word.value) + } else { + hashtagTermList = append(hashtagTermList, word.value) + } + } else { + if word.exclude { + excludedPlainTermList = append(excludedPlainTermList, word.value) + } else { + plainTermList = append(plainTermList, word.value) + } + } + } + + hashtagTerms := strings.Join(hashtagTermList, " ") + excludedHashtagTerms := strings.Join(excludedHashtagTermList, " ") + plainTerms := strings.Join(plainTermList, " ") + excludedPlainTerms := strings.Join(excludedPlainTermList, " ") + + inChannels := []string{} + excludedChannels := []string{} + fromUsers := []string{} + excludedUsers := []string{} + afterDate := "" + excludedAfterDate := "" + beforeDate := "" + excludedBeforeDate := "" + onDate := "" + excludedDate := "" + excludedExtensions := []string{} + extensions := []string{} + + for _, flag := range flags { + if flag.name == "in" || flag.name == "channel" { + if flag.exclude { + excludedChannels = append(excludedChannels, flag.value) + } else { + inChannels = append(inChannels, flag.value) + } + } else if flag.name == "from" { + if flag.exclude { + excludedUsers = append(excludedUsers, flag.value) + } else { + fromUsers = append(fromUsers, flag.value) + } + } else if flag.name == "after" { + if flag.exclude { + excludedAfterDate = flag.value + } else { + afterDate = flag.value + } + } else if flag.name == "before" { + if flag.exclude { + excludedBeforeDate = flag.value + } else { + beforeDate = flag.value + } + } else if flag.name == "on" { + if flag.exclude { + excludedDate = flag.value + } else { + onDate = flag.value + } + } else if flag.name == "ext" { + if flag.exclude { + excludedExtensions = append(excludedExtensions, flag.value) + } else { + extensions = append(extensions, flag.value) + } + } + } + + paramsList := []*SearchParams{} + + if len(plainTerms) > 0 || len(excludedPlainTerms) > 0 { + paramsList = append(paramsList, &SearchParams{ + Terms: plainTerms, + ExcludedTerms: excludedPlainTerms, + IsHashtag: false, + InChannels: inChannels, + ExcludedChannels: excludedChannels, + FromUsers: fromUsers, + ExcludedUsers: excludedUsers, + AfterDate: afterDate, + ExcludedAfterDate: excludedAfterDate, + BeforeDate: beforeDate, + ExcludedBeforeDate: excludedBeforeDate, + Extensions: extensions, + ExcludedExtensions: excludedExtensions, + OnDate: onDate, + ExcludedDate: excludedDate, + TimeZoneOffset: timeZoneOffset, + }) + } + + if len(hashtagTerms) > 0 || len(excludedHashtagTerms) > 0 { + paramsList = append(paramsList, &SearchParams{ + Terms: hashtagTerms, + ExcludedTerms: excludedHashtagTerms, + IsHashtag: true, + InChannels: inChannels, + ExcludedChannels: excludedChannels, + FromUsers: fromUsers, + ExcludedUsers: excludedUsers, + AfterDate: afterDate, + ExcludedAfterDate: excludedAfterDate, + BeforeDate: beforeDate, + ExcludedBeforeDate: excludedBeforeDate, + Extensions: extensions, + ExcludedExtensions: excludedExtensions, + OnDate: onDate, + ExcludedDate: excludedDate, + TimeZoneOffset: timeZoneOffset, + }) + } + + // special case for when no terms are specified but we still have a filter + if len(plainTerms) == 0 && len(hashtagTerms) == 0 && + len(excludedPlainTerms) == 0 && len(excludedHashtagTerms) == 0 && + (len(inChannels) != 0 || len(fromUsers) != 0 || + len(excludedChannels) != 0 || len(excludedUsers) != 0 || + len(extensions) != 0 || len(excludedExtensions) != 0 || + afterDate != "" || excludedAfterDate != "" || + beforeDate != "" || excludedBeforeDate != "" || + onDate != "" || excludedDate != "") { + paramsList = append(paramsList, &SearchParams{ + Terms: "", + ExcludedTerms: "", + IsHashtag: false, + InChannels: inChannels, + ExcludedChannels: excludedChannels, + FromUsers: fromUsers, + ExcludedUsers: excludedUsers, + AfterDate: afterDate, + ExcludedAfterDate: excludedAfterDate, + BeforeDate: beforeDate, + ExcludedBeforeDate: excludedBeforeDate, + Extensions: extensions, + ExcludedExtensions: excludedExtensions, + OnDate: onDate, + ExcludedDate: excludedDate, + TimeZoneOffset: timeZoneOffset, + }) + } + + return paramsList +} + +func IsSearchParamsListValid(paramsList []*SearchParams) *AppError { + // All SearchParams should have same IncludeDeletedChannels value. + for _, params := range paramsList { + if params.IncludeDeletedChannels != paramsList[0].IncludeDeletedChannels { + return NewAppError("IsSearchParamsListValid", "model.search_params_list.is_valid.include_deleted_channels.app_error", nil, "", http.StatusInternalServerError) + } + } + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/security_bulletin.go b/vendor/github.com/mattermost/mattermost-server/v5/model/security_bulletin.go similarity index 71% rename from vendor/github.com/mattermost/mattermost-server/model/security_bulletin.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/security_bulletin.go index 958b9c9..66a6581 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/security_bulletin.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/security_bulletin.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -15,8 +15,8 @@ type SecurityBulletin struct { type SecurityBulletins []SecurityBulletin -func (me *SecurityBulletin) ToJson() string { - b, _ := json.Marshal(me) +func (sb *SecurityBulletin) ToJson() string { + b, _ := json.Marshal(sb) return string(b) } @@ -26,12 +26,12 @@ func SecurityBulletinFromJson(data io.Reader) *SecurityBulletin { return o } -func (me SecurityBulletins) ToJson() string { - if b, err := json.Marshal(me); err != nil { +func (sb SecurityBulletins) ToJson() string { + b, err := json.Marshal(sb) + if err != nil { return "[]" - } else { - return string(b) } + return string(b) } func SecurityBulletinsFromJson(data io.Reader) SecurityBulletins { diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/serialized_gen.go b/vendor/github.com/mattermost/mattermost-server/v5/model/serialized_gen.go new file mode 100644 index 0000000..1f16f1c --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/serialized_gen.go @@ -0,0 +1,1779 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// DecodeMsg implements msgp.Decodable +func (z *Session) DecodeMsg(dc *msgp.Reader) (err error) { + var zb0001 uint32 + zb0001, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 != 13 { + err = msgp.ArrayError{Wanted: 13, Got: zb0001} + return + } + z.Id, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Id") + return + } + z.Token, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Token") + return + } + z.CreateAt, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "CreateAt") + return + } + z.ExpiresAt, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "ExpiresAt") + return + } + z.LastActivityAt, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "LastActivityAt") + return + } + z.UserId, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "UserId") + return + } + z.DeviceId, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "DeviceId") + return + } + z.Roles, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Roles") + return + } + z.IsOAuth, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "IsOAuth") + return + } + z.ExpiredNotify, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "ExpiredNotify") + return + } + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + if z.Props == nil { + z.Props = make(StringMap, zb0002) + } else if len(z.Props) > 0 { + for key := range z.Props { + delete(z.Props, key) + } + } + for zb0002 > 0 { + zb0002-- + var za0001 string + var za0002 string + za0001, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + za0002, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Props", za0001) + return + } + z.Props[za0001] = za0002 + } + var zb0003 uint32 + zb0003, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "TeamMembers") + return + } + if cap(z.TeamMembers) >= int(zb0003) { + z.TeamMembers = (z.TeamMembers)[:zb0003] + } else { + z.TeamMembers = make([]*TeamMember, zb0003) + } + for za0003 := range z.TeamMembers { + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "TeamMembers", za0003) + return + } + z.TeamMembers[za0003] = nil + } else { + if z.TeamMembers[za0003] == nil { + z.TeamMembers[za0003] = new(TeamMember) + } + err = z.TeamMembers[za0003].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "TeamMembers", za0003) + return + } + } + } + z.Local, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "Local") + return + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *Session) EncodeMsg(en *msgp.Writer) (err error) { + // array header, size 13 + err = en.Append(0x9d) + if err != nil { + return + } + err = en.WriteString(z.Id) + if err != nil { + err = msgp.WrapError(err, "Id") + return + } + err = en.WriteString(z.Token) + if err != nil { + err = msgp.WrapError(err, "Token") + return + } + err = en.WriteInt64(z.CreateAt) + if err != nil { + err = msgp.WrapError(err, "CreateAt") + return + } + err = en.WriteInt64(z.ExpiresAt) + if err != nil { + err = msgp.WrapError(err, "ExpiresAt") + return + } + err = en.WriteInt64(z.LastActivityAt) + if err != nil { + err = msgp.WrapError(err, "LastActivityAt") + return + } + err = en.WriteString(z.UserId) + if err != nil { + err = msgp.WrapError(err, "UserId") + return + } + err = en.WriteString(z.DeviceId) + if err != nil { + err = msgp.WrapError(err, "DeviceId") + return + } + err = en.WriteString(z.Roles) + if err != nil { + err = msgp.WrapError(err, "Roles") + return + } + err = en.WriteBool(z.IsOAuth) + if err != nil { + err = msgp.WrapError(err, "IsOAuth") + return + } + err = en.WriteBool(z.ExpiredNotify) + if err != nil { + err = msgp.WrapError(err, "ExpiredNotify") + return + } + err = en.WriteMapHeader(uint32(len(z.Props))) + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + for za0001, za0002 := range z.Props { + err = en.WriteString(za0001) + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + err = en.WriteString(za0002) + if err != nil { + err = msgp.WrapError(err, "Props", za0001) + return + } + } + err = en.WriteArrayHeader(uint32(len(z.TeamMembers))) + if err != nil { + err = msgp.WrapError(err, "TeamMembers") + return + } + for za0003 := range z.TeamMembers { + if z.TeamMembers[za0003] == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z.TeamMembers[za0003].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "TeamMembers", za0003) + return + } + } + } + err = en.WriteBool(z.Local) + if err != nil { + err = msgp.WrapError(err, "Local") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *Session) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // array header, size 13 + o = append(o, 0x9d) + o = msgp.AppendString(o, z.Id) + o = msgp.AppendString(o, z.Token) + o = msgp.AppendInt64(o, z.CreateAt) + o = msgp.AppendInt64(o, z.ExpiresAt) + o = msgp.AppendInt64(o, z.LastActivityAt) + o = msgp.AppendString(o, z.UserId) + o = msgp.AppendString(o, z.DeviceId) + o = msgp.AppendString(o, z.Roles) + o = msgp.AppendBool(o, z.IsOAuth) + o = msgp.AppendBool(o, z.ExpiredNotify) + o = msgp.AppendMapHeader(o, uint32(len(z.Props))) + for za0001, za0002 := range z.Props { + o = msgp.AppendString(o, za0001) + o = msgp.AppendString(o, za0002) + } + o = msgp.AppendArrayHeader(o, uint32(len(z.TeamMembers))) + for za0003 := range z.TeamMembers { + if z.TeamMembers[za0003] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.TeamMembers[za0003].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "TeamMembers", za0003) + return + } + } + } + o = msgp.AppendBool(o, z.Local) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *Session) UnmarshalMsg(bts []byte) (o []byte, err error) { + var zb0001 uint32 + zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 != 13 { + err = msgp.ArrayError{Wanted: 13, Got: zb0001} + return + } + z.Id, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Id") + return + } + z.Token, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Token") + return + } + z.CreateAt, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "CreateAt") + return + } + z.ExpiresAt, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ExpiresAt") + return + } + z.LastActivityAt, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "LastActivityAt") + return + } + z.UserId, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "UserId") + return + } + z.DeviceId, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "DeviceId") + return + } + z.Roles, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Roles") + return + } + z.IsOAuth, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "IsOAuth") + return + } + z.ExpiredNotify, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ExpiredNotify") + return + } + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + if z.Props == nil { + z.Props = make(StringMap, zb0002) + } else if len(z.Props) > 0 { + for key := range z.Props { + delete(z.Props, key) + } + } + for zb0002 > 0 { + var za0001 string + var za0002 string + zb0002-- + za0001, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + za0002, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Props", za0001) + return + } + z.Props[za0001] = za0002 + } + var zb0003 uint32 + zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TeamMembers") + return + } + if cap(z.TeamMembers) >= int(zb0003) { + z.TeamMembers = (z.TeamMembers)[:zb0003] + } else { + z.TeamMembers = make([]*TeamMember, zb0003) + } + for za0003 := range z.TeamMembers { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.TeamMembers[za0003] = nil + } else { + if z.TeamMembers[za0003] == nil { + z.TeamMembers[za0003] = new(TeamMember) + } + bts, err = z.TeamMembers[za0003].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "TeamMembers", za0003) + return + } + } + } + z.Local, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Local") + return + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Session) Msgsize() (s int) { + s = 1 + msgp.StringPrefixSize + len(z.Id) + msgp.StringPrefixSize + len(z.Token) + msgp.Int64Size + msgp.Int64Size + msgp.Int64Size + msgp.StringPrefixSize + len(z.UserId) + msgp.StringPrefixSize + len(z.DeviceId) + msgp.StringPrefixSize + len(z.Roles) + msgp.BoolSize + msgp.BoolSize + msgp.MapHeaderSize + if z.Props != nil { + for za0001, za0002 := range z.Props { + _ = za0002 + s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002) + } + } + s += msgp.ArrayHeaderSize + for za0003 := range z.TeamMembers { + if z.TeamMembers[za0003] == nil { + s += msgp.NilSize + } else { + s += z.TeamMembers[za0003].Msgsize() + } + } + s += msgp.BoolSize + return +} + +// DecodeMsg implements msgp.Decodable +func (z *StringMap) DecodeMsg(dc *msgp.Reader) (err error) { + var zb0003 uint32 + zb0003, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + if (*z) == nil { + (*z) = make(StringMap, zb0003) + } else if len((*z)) > 0 { + for key := range *z { + delete((*z), key) + } + } + for zb0003 > 0 { + zb0003-- + var zb0001 string + var zb0002 string + zb0001, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err) + return + } + zb0002, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, zb0001) + return + } + (*z)[zb0001] = zb0002 + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z StringMap) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteMapHeader(uint32(len(z))) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0004, zb0005 := range z { + err = en.WriteString(zb0004) + if err != nil { + err = msgp.WrapError(err) + return + } + err = en.WriteString(zb0005) + if err != nil { + err = msgp.WrapError(err, zb0004) + return + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z StringMap) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendMapHeader(o, uint32(len(z))) + for zb0004, zb0005 := range z { + o = msgp.AppendString(o, zb0004) + o = msgp.AppendString(o, zb0005) + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *StringMap) UnmarshalMsg(bts []byte) (o []byte, err error) { + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if (*z) == nil { + (*z) = make(StringMap, zb0003) + } else if len((*z)) > 0 { + for key := range *z { + delete((*z), key) + } + } + for zb0003 > 0 { + var zb0001 string + var zb0002 string + zb0003-- + zb0001, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + zb0002, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, zb0001) + return + } + (*z)[zb0001] = zb0002 + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z StringMap) Msgsize() (s int) { + s = msgp.MapHeaderSize + if z != nil { + for zb0004, zb0005 := range z { + _ = zb0005 + s += msgp.StringPrefixSize + len(zb0004) + msgp.StringPrefixSize + len(zb0005) + } + } + return +} + +// DecodeMsg implements msgp.Decodable +func (z *TeamMember) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "TeamId": + z.TeamId, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "TeamId") + return + } + case "UserId": + z.UserId, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "UserId") + return + } + case "Roles": + z.Roles, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Roles") + return + } + case "DeleteAt": + z.DeleteAt, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "DeleteAt") + return + } + case "SchemeGuest": + z.SchemeGuest, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "SchemeGuest") + return + } + case "SchemeUser": + z.SchemeUser, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "SchemeUser") + return + } + case "SchemeAdmin": + z.SchemeAdmin, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "SchemeAdmin") + return + } + case "ExplicitRoles": + z.ExplicitRoles, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "ExplicitRoles") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *TeamMember) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 8 + // write "TeamId" + err = en.Append(0x88, 0xa6, 0x54, 0x65, 0x61, 0x6d, 0x49, 0x64) + if err != nil { + return + } + err = en.WriteString(z.TeamId) + if err != nil { + err = msgp.WrapError(err, "TeamId") + return + } + // write "UserId" + err = en.Append(0xa6, 0x55, 0x73, 0x65, 0x72, 0x49, 0x64) + if err != nil { + return + } + err = en.WriteString(z.UserId) + if err != nil { + err = msgp.WrapError(err, "UserId") + return + } + // write "Roles" + err = en.Append(0xa5, 0x52, 0x6f, 0x6c, 0x65, 0x73) + if err != nil { + return + } + err = en.WriteString(z.Roles) + if err != nil { + err = msgp.WrapError(err, "Roles") + return + } + // write "DeleteAt" + err = en.Append(0xa8, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x74) + if err != nil { + return + } + err = en.WriteInt64(z.DeleteAt) + if err != nil { + err = msgp.WrapError(err, "DeleteAt") + return + } + // write "SchemeGuest" + err = en.Append(0xab, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x47, 0x75, 0x65, 0x73, 0x74) + if err != nil { + return + } + err = en.WriteBool(z.SchemeGuest) + if err != nil { + err = msgp.WrapError(err, "SchemeGuest") + return + } + // write "SchemeUser" + err = en.Append(0xaa, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x55, 0x73, 0x65, 0x72) + if err != nil { + return + } + err = en.WriteBool(z.SchemeUser) + if err != nil { + err = msgp.WrapError(err, "SchemeUser") + return + } + // write "SchemeAdmin" + err = en.Append(0xab, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e) + if err != nil { + return + } + err = en.WriteBool(z.SchemeAdmin) + if err != nil { + err = msgp.WrapError(err, "SchemeAdmin") + return + } + // write "ExplicitRoles" + err = en.Append(0xad, 0x45, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x52, 0x6f, 0x6c, 0x65, 0x73) + if err != nil { + return + } + err = en.WriteString(z.ExplicitRoles) + if err != nil { + err = msgp.WrapError(err, "ExplicitRoles") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *TeamMember) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 8 + // string "TeamId" + o = append(o, 0x88, 0xa6, 0x54, 0x65, 0x61, 0x6d, 0x49, 0x64) + o = msgp.AppendString(o, z.TeamId) + // string "UserId" + o = append(o, 0xa6, 0x55, 0x73, 0x65, 0x72, 0x49, 0x64) + o = msgp.AppendString(o, z.UserId) + // string "Roles" + o = append(o, 0xa5, 0x52, 0x6f, 0x6c, 0x65, 0x73) + o = msgp.AppendString(o, z.Roles) + // string "DeleteAt" + o = append(o, 0xa8, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x74) + o = msgp.AppendInt64(o, z.DeleteAt) + // string "SchemeGuest" + o = append(o, 0xab, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x47, 0x75, 0x65, 0x73, 0x74) + o = msgp.AppendBool(o, z.SchemeGuest) + // string "SchemeUser" + o = append(o, 0xaa, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x55, 0x73, 0x65, 0x72) + o = msgp.AppendBool(o, z.SchemeUser) + // string "SchemeAdmin" + o = append(o, 0xab, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e) + o = msgp.AppendBool(o, z.SchemeAdmin) + // string "ExplicitRoles" + o = append(o, 0xad, 0x45, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x52, 0x6f, 0x6c, 0x65, 0x73) + o = msgp.AppendString(o, z.ExplicitRoles) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *TeamMember) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "TeamId": + z.TeamId, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TeamId") + return + } + case "UserId": + z.UserId, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "UserId") + return + } + case "Roles": + z.Roles, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Roles") + return + } + case "DeleteAt": + z.DeleteAt, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "DeleteAt") + return + } + case "SchemeGuest": + z.SchemeGuest, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "SchemeGuest") + return + } + case "SchemeUser": + z.SchemeUser, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "SchemeUser") + return + } + case "SchemeAdmin": + z.SchemeAdmin, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "SchemeAdmin") + return + } + case "ExplicitRoles": + z.ExplicitRoles, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ExplicitRoles") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *TeamMember) Msgsize() (s int) { + s = 1 + 7 + msgp.StringPrefixSize + len(z.TeamId) + 7 + msgp.StringPrefixSize + len(z.UserId) + 6 + msgp.StringPrefixSize + len(z.Roles) + 9 + msgp.Int64Size + 12 + msgp.BoolSize + 11 + msgp.BoolSize + 12 + msgp.BoolSize + 14 + msgp.StringPrefixSize + len(z.ExplicitRoles) + return +} + +// DecodeMsg implements msgp.Decodable +func (z *User) DecodeMsg(dc *msgp.Reader) (err error) { + var zb0001 uint32 + zb0001, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 != 31 { + err = msgp.ArrayError{Wanted: 31, Got: zb0001} + return + } + z.Id, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Id") + return + } + z.CreateAt, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "CreateAt") + return + } + z.UpdateAt, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "UpdateAt") + return + } + z.DeleteAt, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "DeleteAt") + return + } + z.Username, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Username") + return + } + z.Password, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Password") + return + } + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "AuthData") + return + } + z.AuthData = nil + } else { + if z.AuthData == nil { + z.AuthData = new(string) + } + *z.AuthData, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "AuthData") + return + } + } + z.AuthService, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "AuthService") + return + } + z.Email, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Email") + return + } + z.EmailVerified, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "EmailVerified") + return + } + z.Nickname, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Nickname") + return + } + z.FirstName, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "FirstName") + return + } + z.LastName, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "LastName") + return + } + z.Position, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Position") + return + } + z.Roles, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Roles") + return + } + z.AllowMarketing, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "AllowMarketing") + return + } + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + if z.Props == nil { + z.Props = make(StringMap, zb0002) + } else if len(z.Props) > 0 { + for key := range z.Props { + delete(z.Props, key) + } + } + for zb0002 > 0 { + zb0002-- + var za0001 string + var za0002 string + za0001, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + za0002, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Props", za0001) + return + } + z.Props[za0001] = za0002 + } + var zb0003 uint32 + zb0003, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "NotifyProps") + return + } + if z.NotifyProps == nil { + z.NotifyProps = make(StringMap, zb0003) + } else if len(z.NotifyProps) > 0 { + for key := range z.NotifyProps { + delete(z.NotifyProps, key) + } + } + for zb0003 > 0 { + zb0003-- + var za0003 string + var za0004 string + za0003, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "NotifyProps") + return + } + za0004, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "NotifyProps", za0003) + return + } + z.NotifyProps[za0003] = za0004 + } + z.LastPasswordUpdate, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "LastPasswordUpdate") + return + } + z.LastPictureUpdate, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "LastPictureUpdate") + return + } + z.FailedAttempts, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "FailedAttempts") + return + } + z.Locale, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Locale") + return + } + var zb0004 uint32 + zb0004, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "Timezone") + return + } + if z.Timezone == nil { + z.Timezone = make(StringMap, zb0004) + } else if len(z.Timezone) > 0 { + for key := range z.Timezone { + delete(z.Timezone, key) + } + } + for zb0004 > 0 { + zb0004-- + var za0005 string + var za0006 string + za0005, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Timezone") + return + } + za0006, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Timezone", za0005) + return + } + z.Timezone[za0005] = za0006 + } + z.MfaActive, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "MfaActive") + return + } + z.MfaSecret, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "MfaSecret") + return + } + z.LastActivityAt, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "LastActivityAt") + return + } + z.IsBot, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "IsBot") + return + } + z.BotDescription, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "BotDescription") + return + } + z.BotLastIconUpdate, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "BotLastIconUpdate") + return + } + z.TermsOfServiceId, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "TermsOfServiceId") + return + } + z.TermsOfServiceCreateAt, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "TermsOfServiceCreateAt") + return + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *User) EncodeMsg(en *msgp.Writer) (err error) { + // array header, size 31 + err = en.Append(0xdc, 0x0, 0x1f) + if err != nil { + return + } + err = en.WriteString(z.Id) + if err != nil { + err = msgp.WrapError(err, "Id") + return + } + err = en.WriteInt64(z.CreateAt) + if err != nil { + err = msgp.WrapError(err, "CreateAt") + return + } + err = en.WriteInt64(z.UpdateAt) + if err != nil { + err = msgp.WrapError(err, "UpdateAt") + return + } + err = en.WriteInt64(z.DeleteAt) + if err != nil { + err = msgp.WrapError(err, "DeleteAt") + return + } + err = en.WriteString(z.Username) + if err != nil { + err = msgp.WrapError(err, "Username") + return + } + err = en.WriteString(z.Password) + if err != nil { + err = msgp.WrapError(err, "Password") + return + } + if z.AuthData == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = en.WriteString(*z.AuthData) + if err != nil { + err = msgp.WrapError(err, "AuthData") + return + } + } + err = en.WriteString(z.AuthService) + if err != nil { + err = msgp.WrapError(err, "AuthService") + return + } + err = en.WriteString(z.Email) + if err != nil { + err = msgp.WrapError(err, "Email") + return + } + err = en.WriteBool(z.EmailVerified) + if err != nil { + err = msgp.WrapError(err, "EmailVerified") + return + } + err = en.WriteString(z.Nickname) + if err != nil { + err = msgp.WrapError(err, "Nickname") + return + } + err = en.WriteString(z.FirstName) + if err != nil { + err = msgp.WrapError(err, "FirstName") + return + } + err = en.WriteString(z.LastName) + if err != nil { + err = msgp.WrapError(err, "LastName") + return + } + err = en.WriteString(z.Position) + if err != nil { + err = msgp.WrapError(err, "Position") + return + } + err = en.WriteString(z.Roles) + if err != nil { + err = msgp.WrapError(err, "Roles") + return + } + err = en.WriteBool(z.AllowMarketing) + if err != nil { + err = msgp.WrapError(err, "AllowMarketing") + return + } + err = en.WriteMapHeader(uint32(len(z.Props))) + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + for za0001, za0002 := range z.Props { + err = en.WriteString(za0001) + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + err = en.WriteString(za0002) + if err != nil { + err = msgp.WrapError(err, "Props", za0001) + return + } + } + err = en.WriteMapHeader(uint32(len(z.NotifyProps))) + if err != nil { + err = msgp.WrapError(err, "NotifyProps") + return + } + for za0003, za0004 := range z.NotifyProps { + err = en.WriteString(za0003) + if err != nil { + err = msgp.WrapError(err, "NotifyProps") + return + } + err = en.WriteString(za0004) + if err != nil { + err = msgp.WrapError(err, "NotifyProps", za0003) + return + } + } + err = en.WriteInt64(z.LastPasswordUpdate) + if err != nil { + err = msgp.WrapError(err, "LastPasswordUpdate") + return + } + err = en.WriteInt64(z.LastPictureUpdate) + if err != nil { + err = msgp.WrapError(err, "LastPictureUpdate") + return + } + err = en.WriteInt(z.FailedAttempts) + if err != nil { + err = msgp.WrapError(err, "FailedAttempts") + return + } + err = en.WriteString(z.Locale) + if err != nil { + err = msgp.WrapError(err, "Locale") + return + } + err = en.WriteMapHeader(uint32(len(z.Timezone))) + if err != nil { + err = msgp.WrapError(err, "Timezone") + return + } + for za0005, za0006 := range z.Timezone { + err = en.WriteString(za0005) + if err != nil { + err = msgp.WrapError(err, "Timezone") + return + } + err = en.WriteString(za0006) + if err != nil { + err = msgp.WrapError(err, "Timezone", za0005) + return + } + } + err = en.WriteBool(z.MfaActive) + if err != nil { + err = msgp.WrapError(err, "MfaActive") + return + } + err = en.WriteString(z.MfaSecret) + if err != nil { + err = msgp.WrapError(err, "MfaSecret") + return + } + err = en.WriteInt64(z.LastActivityAt) + if err != nil { + err = msgp.WrapError(err, "LastActivityAt") + return + } + err = en.WriteBool(z.IsBot) + if err != nil { + err = msgp.WrapError(err, "IsBot") + return + } + err = en.WriteString(z.BotDescription) + if err != nil { + err = msgp.WrapError(err, "BotDescription") + return + } + err = en.WriteInt64(z.BotLastIconUpdate) + if err != nil { + err = msgp.WrapError(err, "BotLastIconUpdate") + return + } + err = en.WriteString(z.TermsOfServiceId) + if err != nil { + err = msgp.WrapError(err, "TermsOfServiceId") + return + } + err = en.WriteInt64(z.TermsOfServiceCreateAt) + if err != nil { + err = msgp.WrapError(err, "TermsOfServiceCreateAt") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *User) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // array header, size 31 + o = append(o, 0xdc, 0x0, 0x1f) + o = msgp.AppendString(o, z.Id) + o = msgp.AppendInt64(o, z.CreateAt) + o = msgp.AppendInt64(o, z.UpdateAt) + o = msgp.AppendInt64(o, z.DeleteAt) + o = msgp.AppendString(o, z.Username) + o = msgp.AppendString(o, z.Password) + if z.AuthData == nil { + o = msgp.AppendNil(o) + } else { + o = msgp.AppendString(o, *z.AuthData) + } + o = msgp.AppendString(o, z.AuthService) + o = msgp.AppendString(o, z.Email) + o = msgp.AppendBool(o, z.EmailVerified) + o = msgp.AppendString(o, z.Nickname) + o = msgp.AppendString(o, z.FirstName) + o = msgp.AppendString(o, z.LastName) + o = msgp.AppendString(o, z.Position) + o = msgp.AppendString(o, z.Roles) + o = msgp.AppendBool(o, z.AllowMarketing) + o = msgp.AppendMapHeader(o, uint32(len(z.Props))) + for za0001, za0002 := range z.Props { + o = msgp.AppendString(o, za0001) + o = msgp.AppendString(o, za0002) + } + o = msgp.AppendMapHeader(o, uint32(len(z.NotifyProps))) + for za0003, za0004 := range z.NotifyProps { + o = msgp.AppendString(o, za0003) + o = msgp.AppendString(o, za0004) + } + o = msgp.AppendInt64(o, z.LastPasswordUpdate) + o = msgp.AppendInt64(o, z.LastPictureUpdate) + o = msgp.AppendInt(o, z.FailedAttempts) + o = msgp.AppendString(o, z.Locale) + o = msgp.AppendMapHeader(o, uint32(len(z.Timezone))) + for za0005, za0006 := range z.Timezone { + o = msgp.AppendString(o, za0005) + o = msgp.AppendString(o, za0006) + } + o = msgp.AppendBool(o, z.MfaActive) + o = msgp.AppendString(o, z.MfaSecret) + o = msgp.AppendInt64(o, z.LastActivityAt) + o = msgp.AppendBool(o, z.IsBot) + o = msgp.AppendString(o, z.BotDescription) + o = msgp.AppendInt64(o, z.BotLastIconUpdate) + o = msgp.AppendString(o, z.TermsOfServiceId) + o = msgp.AppendInt64(o, z.TermsOfServiceCreateAt) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *User) UnmarshalMsg(bts []byte) (o []byte, err error) { + var zb0001 uint32 + zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 != 31 { + err = msgp.ArrayError{Wanted: 31, Got: zb0001} + return + } + z.Id, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Id") + return + } + z.CreateAt, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "CreateAt") + return + } + z.UpdateAt, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "UpdateAt") + return + } + z.DeleteAt, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "DeleteAt") + return + } + z.Username, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Username") + return + } + z.Password, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Password") + return + } + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.AuthData = nil + } else { + if z.AuthData == nil { + z.AuthData = new(string) + } + *z.AuthData, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AuthData") + return + } + } + z.AuthService, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AuthService") + return + } + z.Email, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Email") + return + } + z.EmailVerified, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "EmailVerified") + return + } + z.Nickname, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Nickname") + return + } + z.FirstName, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "FirstName") + return + } + z.LastName, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "LastName") + return + } + z.Position, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Position") + return + } + z.Roles, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Roles") + return + } + z.AllowMarketing, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AllowMarketing") + return + } + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + if z.Props == nil { + z.Props = make(StringMap, zb0002) + } else if len(z.Props) > 0 { + for key := range z.Props { + delete(z.Props, key) + } + } + for zb0002 > 0 { + var za0001 string + var za0002 string + zb0002-- + za0001, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Props") + return + } + za0002, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Props", za0001) + return + } + z.Props[za0001] = za0002 + } + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "NotifyProps") + return + } + if z.NotifyProps == nil { + z.NotifyProps = make(StringMap, zb0003) + } else if len(z.NotifyProps) > 0 { + for key := range z.NotifyProps { + delete(z.NotifyProps, key) + } + } + for zb0003 > 0 { + var za0003 string + var za0004 string + zb0003-- + za0003, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "NotifyProps") + return + } + za0004, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "NotifyProps", za0003) + return + } + z.NotifyProps[za0003] = za0004 + } + z.LastPasswordUpdate, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "LastPasswordUpdate") + return + } + z.LastPictureUpdate, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "LastPictureUpdate") + return + } + z.FailedAttempts, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "FailedAttempts") + return + } + z.Locale, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Locale") + return + } + var zb0004 uint32 + zb0004, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Timezone") + return + } + if z.Timezone == nil { + z.Timezone = make(StringMap, zb0004) + } else if len(z.Timezone) > 0 { + for key := range z.Timezone { + delete(z.Timezone, key) + } + } + for zb0004 > 0 { + var za0005 string + var za0006 string + zb0004-- + za0005, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Timezone") + return + } + za0006, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Timezone", za0005) + return + } + z.Timezone[za0005] = za0006 + } + z.MfaActive, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "MfaActive") + return + } + z.MfaSecret, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "MfaSecret") + return + } + z.LastActivityAt, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "LastActivityAt") + return + } + z.IsBot, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "IsBot") + return + } + z.BotDescription, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "BotDescription") + return + } + z.BotLastIconUpdate, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "BotLastIconUpdate") + return + } + z.TermsOfServiceId, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TermsOfServiceId") + return + } + z.TermsOfServiceCreateAt, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TermsOfServiceCreateAt") + return + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *User) Msgsize() (s int) { + s = 3 + msgp.StringPrefixSize + len(z.Id) + msgp.Int64Size + msgp.Int64Size + msgp.Int64Size + msgp.StringPrefixSize + len(z.Username) + msgp.StringPrefixSize + len(z.Password) + if z.AuthData == nil { + s += msgp.NilSize + } else { + s += msgp.StringPrefixSize + len(*z.AuthData) + } + s += msgp.StringPrefixSize + len(z.AuthService) + msgp.StringPrefixSize + len(z.Email) + msgp.BoolSize + msgp.StringPrefixSize + len(z.Nickname) + msgp.StringPrefixSize + len(z.FirstName) + msgp.StringPrefixSize + len(z.LastName) + msgp.StringPrefixSize + len(z.Position) + msgp.StringPrefixSize + len(z.Roles) + msgp.BoolSize + msgp.MapHeaderSize + if z.Props != nil { + for za0001, za0002 := range z.Props { + _ = za0002 + s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002) + } + } + s += msgp.MapHeaderSize + if z.NotifyProps != nil { + for za0003, za0004 := range z.NotifyProps { + _ = za0004 + s += msgp.StringPrefixSize + len(za0003) + msgp.StringPrefixSize + len(za0004) + } + } + s += msgp.Int64Size + msgp.Int64Size + msgp.IntSize + msgp.StringPrefixSize + len(z.Locale) + msgp.MapHeaderSize + if z.Timezone != nil { + for za0005, za0006 := range z.Timezone { + _ = za0006 + s += msgp.StringPrefixSize + len(za0005) + msgp.StringPrefixSize + len(za0006) + } + } + s += msgp.BoolSize + msgp.StringPrefixSize + len(z.MfaSecret) + msgp.Int64Size + msgp.BoolSize + msgp.StringPrefixSize + len(z.BotDescription) + msgp.Int64Size + msgp.StringPrefixSize + len(z.TermsOfServiceId) + msgp.Int64Size + return +} + +// DecodeMsg implements msgp.Decodable +func (z *UserMap) DecodeMsg(dc *msgp.Reader) (err error) { + var zb0003 uint32 + zb0003, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + if (*z) == nil { + (*z) = make(UserMap, zb0003) + } else if len((*z)) > 0 { + for key := range *z { + delete((*z), key) + } + } + for zb0003 > 0 { + zb0003-- + var zb0001 string + var zb0002 *User + zb0001, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err) + return + } + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, zb0001) + return + } + zb0002 = nil + } else { + if zb0002 == nil { + zb0002 = new(User) + } + err = zb0002.DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, zb0001) + return + } + } + (*z)[zb0001] = zb0002 + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z UserMap) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteMapHeader(uint32(len(z))) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0004, zb0005 := range z { + err = en.WriteString(zb0004) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0005 == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = zb0005.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, zb0004) + return + } + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z UserMap) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendMapHeader(o, uint32(len(z))) + for zb0004, zb0005 := range z { + o = msgp.AppendString(o, zb0004) + if zb0005 == nil { + o = msgp.AppendNil(o) + } else { + o, err = zb0005.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, zb0004) + return + } + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *UserMap) UnmarshalMsg(bts []byte) (o []byte, err error) { + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if (*z) == nil { + (*z) = make(UserMap, zb0003) + } else if len((*z)) > 0 { + for key := range *z { + delete((*z), key) + } + } + for zb0003 > 0 { + var zb0001 string + var zb0002 *User + zb0003-- + zb0001, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + zb0002 = nil + } else { + if zb0002 == nil { + zb0002 = new(User) + } + bts, err = zb0002.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, zb0001) + return + } + } + (*z)[zb0001] = zb0002 + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z UserMap) Msgsize() (s int) { + s = msgp.MapHeaderSize + if z != nil { + for zb0004, zb0005 := range z { + _ = zb0005 + s += msgp.StringPrefixSize + len(zb0004) + if zb0005 == nil { + s += msgp.NilSize + } else { + s += zb0005.Msgsize() + } + } + } + return +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/session.go b/vendor/github.com/mattermost/mattermost-server/v5/model/session.go new file mode 100644 index 0000000..0aa6fc1 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/session.go @@ -0,0 +1,231 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "strconv" + "strings" + + "github.com/mattermost/mattermost-server/v5/mlog" +) + +const ( + SESSION_COOKIE_TOKEN = "MMAUTHTOKEN" + SESSION_COOKIE_USER = "MMUSERID" + SESSION_COOKIE_CSRF = "MMCSRF" + SESSION_CACHE_SIZE = 35000 + SESSION_PROP_PLATFORM = "platform" + SESSION_PROP_OS = "os" + SESSION_PROP_BROWSER = "browser" + SESSION_PROP_TYPE = "type" + SESSION_PROP_USER_ACCESS_TOKEN_ID = "user_access_token_id" + SESSION_PROP_IS_BOT = "is_bot" + SESSION_PROP_IS_BOT_VALUE = "true" + SESSION_TYPE_USER_ACCESS_TOKEN = "UserAccessToken" + SESSION_TYPE_CLOUD_KEY = "CloudKey" + SESSION_PROP_IS_GUEST = "is_guest" + SESSION_ACTIVITY_TIMEOUT = 1000 * 60 * 5 // 5 minutes + SESSION_USER_ACCESS_TOKEN_EXPIRY = 100 * 365 // 100 years +) + +//msgp:tuple Session + +// Session contains the user session details. +// This struct's serializer methods are auto-generated. If a new field is added/removed, +// please run make gen-serialized. +type Session struct { + Id string `json:"id"` + Token string `json:"token"` + CreateAt int64 `json:"create_at"` + ExpiresAt int64 `json:"expires_at"` + LastActivityAt int64 `json:"last_activity_at"` + UserId string `json:"user_id"` + DeviceId string `json:"device_id"` + Roles string `json:"roles"` + IsOAuth bool `json:"is_oauth"` + ExpiredNotify bool `json:"expired_notify"` + Props StringMap `json:"props"` + TeamMembers []*TeamMember `json:"team_members" db:"-"` + Local bool `json:"local" db:"-"` +} + +// Returns true if the session is unrestricted, which should grant it +// with all permissions. This is used for local mode sessions +func (s *Session) IsUnrestricted() bool { + return s.Local +} + +func (s *Session) DeepCopy() *Session { + copySession := *s + + if s.Props != nil { + copySession.Props = CopyStringMap(s.Props) + } + + if s.TeamMembers != nil { + copySession.TeamMembers = make([]*TeamMember, len(s.TeamMembers)) + for index, tm := range s.TeamMembers { + copySession.TeamMembers[index] = new(TeamMember) + *copySession.TeamMembers[index] = *tm + } + } + + return ©Session +} + +func (s *Session) ToJson() string { + b, _ := json.Marshal(s) + return string(b) +} + +func SessionFromJson(data io.Reader) *Session { + var s *Session + json.NewDecoder(data).Decode(&s) + return s +} + +func (s *Session) PreSave() { + if s.Id == "" { + s.Id = NewId() + } + + if s.Token == "" { + s.Token = NewId() + } + + s.CreateAt = GetMillis() + s.LastActivityAt = s.CreateAt + + if s.Props == nil { + s.Props = make(map[string]string) + } +} + +func (s *Session) Sanitize() { + s.Token = "" +} + +func (s *Session) IsExpired() bool { + + if s.ExpiresAt <= 0 { + return false + } + + if GetMillis() > s.ExpiresAt { + return true + } + + return false +} + +// Deprecated: SetExpireInDays is deprecated and should not be used. +// Use (*App).SetSessionExpireInDays instead which handles the +// cases where the new ExpiresAt is not relative to CreateAt. +func (s *Session) SetExpireInDays(days int) { + if s.CreateAt == 0 { + s.ExpiresAt = GetMillis() + (1000 * 60 * 60 * 24 * int64(days)) + } else { + s.ExpiresAt = s.CreateAt + (1000 * 60 * 60 * 24 * int64(days)) + } +} + +func (s *Session) AddProp(key string, value string) { + + if s.Props == nil { + s.Props = make(map[string]string) + } + + s.Props[key] = value +} + +func (s *Session) GetTeamByTeamId(teamId string) *TeamMember { + for _, team := range s.TeamMembers { + if team.TeamId == teamId { + return team + } + } + + return nil +} + +func (s *Session) IsMobileApp() bool { + return len(s.DeviceId) > 0 || s.IsMobile() +} + +func (s *Session) IsMobile() bool { + val, ok := s.Props[USER_AUTH_SERVICE_IS_MOBILE] + if !ok { + return false + } + isMobile, err := strconv.ParseBool(val) + if err != nil { + mlog.Debug("Error parsing boolean property from Session", mlog.Err(err)) + return false + } + return isMobile +} + +func (s *Session) IsSaml() bool { + val, ok := s.Props[USER_AUTH_SERVICE_IS_SAML] + if !ok { + return false + } + isSaml, err := strconv.ParseBool(val) + if err != nil { + mlog.Debug("Error parsing boolean property from Session", mlog.Err(err)) + return false + } + return isSaml +} + +func (s *Session) IsOAuthUser() bool { + val, ok := s.Props[USER_AUTH_SERVICE_IS_OAUTH] + if !ok { + return false + } + isOAuthUser, err := strconv.ParseBool(val) + if err != nil { + mlog.Debug("Error parsing boolean property from Session", mlog.Err(err)) + return false + } + return isOAuthUser +} + +func (s *Session) IsSSOLogin() bool { + return s.IsOAuthUser() || s.IsSaml() +} + +func (s *Session) GetUserRoles() []string { + return strings.Fields(s.Roles) +} + +func (s *Session) GenerateCSRF() string { + token := NewId() + s.AddProp("csrf", token) + return token +} + +func (s *Session) GetCSRF() string { + if s.Props == nil { + return "" + } + + return s.Props["csrf"] +} + +func SessionsToJson(o []*Session) string { + b, err := json.Marshal(o) + if err != nil { + return "[]" + } + return string(b) +} + +func SessionsFromJson(data io.Reader) []*Session { + var o []*Session + json.NewDecoder(data).Decode(&o) + return o +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/slack_attachment.go b/vendor/github.com/mattermost/mattermost-server/v5/model/slack_attachment.go similarity index 96% rename from vendor/github.com/mattermost/mattermost-server/model/slack_attachment.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/slack_attachment.go index 0f1b96f..a85c6be 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/slack_attachment.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/slack_attachment.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -111,11 +111,7 @@ func (s *SlackAttachment) Equals(input *SlackAttachment) bool { } } - if s.Timestamp != input.Timestamp { - return false - } - - return true + return s.Timestamp == input.Timestamp } type SlackAttachmentField struct { diff --git a/vendor/github.com/mattermost/mattermost-server/model/slack_compatibility.go b/vendor/github.com/mattermost/mattermost-server/v5/model/slack_compatibility.go similarity index 86% rename from vendor/github.com/mattermost/mattermost-server/model/slack_compatibility.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/slack_compatibility.go index 2553906..2d3e287 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/slack_compatibility.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/slack_compatibility.go @@ -1,5 +1,5 @@ -// Copyright (c) 2019-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model diff --git a/vendor/github.com/mattermost/mattermost-server/model/status.go b/vendor/github.com/mattermost/mattermost-server/v5/model/status.go similarity index 73% rename from vendor/github.com/mattermost/mattermost-server/model/status.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/status.go index 7888c60..1f32422 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/status.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/status.go @@ -1,5 +1,5 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -28,15 +28,15 @@ type Status struct { } func (o *Status) ToJson() string { - tempChannelId := o.ActiveChannel - o.ActiveChannel = "" - b, _ := json.Marshal(o) - o.ActiveChannel = tempChannelId + oCopy := *o + oCopy.ActiveChannel = "" + b, _ := json.Marshal(oCopy) return string(b) } func (o *Status) ToClusterJson() string { - b, _ := json.Marshal(o) + oCopy := *o + b, _ := json.Marshal(oCopy) return string(b) } @@ -47,18 +47,14 @@ func StatusFromJson(data io.Reader) *Status { } func StatusListToJson(u []*Status) string { - activeChannels := make([]string, len(u)) - for index, s := range u { - activeChannels[index] = s.ActiveChannel - s.ActiveChannel = "" - } - - b, _ := json.Marshal(u) - - for index, s := range u { - s.ActiveChannel = activeChannels[index] + uCopy := make([]Status, len(u)) + for i, s := range u { + sCopy := *s + sCopy.ActiveChannel = "" + uCopy[i] = sCopy } + b, _ := json.Marshal(uCopy) return string(b) } diff --git a/vendor/github.com/mattermost/mattermost-server/model/suggest_command.go b/vendor/github.com/mattermost/mattermost-server/v5/model/suggest_command.go similarity index 91% rename from vendor/github.com/mattermost/mattermost-server/model/suggest_command.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/suggest_command.go index 44f46bf..45a7af3 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/suggest_command.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/suggest_command.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model diff --git a/vendor/github.com/mattermost/mattermost-server/model/switch_request.go b/vendor/github.com/mattermost/mattermost-server/v5/model/switch_request.go similarity index 81% rename from vendor/github.com/mattermost/mattermost-server/model/switch_request.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/switch_request.go index 2a522f4..bdb9004 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/switch_request.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/switch_request.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -34,14 +34,16 @@ func (o *SwitchRequest) EmailToOAuth() bool { (o.NewService == USER_AUTH_SERVICE_SAML || o.NewService == USER_AUTH_SERVICE_GITLAB || o.NewService == SERVICE_GOOGLE || - o.NewService == SERVICE_OFFICE365) + o.NewService == SERVICE_OFFICE365 || + o.NewService == SERVICE_OPENID) } func (o *SwitchRequest) OAuthToEmail() bool { return (o.CurrentService == USER_AUTH_SERVICE_SAML || o.CurrentService == USER_AUTH_SERVICE_GITLAB || o.CurrentService == SERVICE_GOOGLE || - o.CurrentService == SERVICE_OFFICE365) && o.NewService == USER_AUTH_SERVICE_EMAIL + o.CurrentService == SERVICE_OFFICE365 || + o.CurrentService == SERVICE_OPENID) && o.NewService == USER_AUTH_SERVICE_EMAIL } func (o *SwitchRequest) EmailToLdap() bool { diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/system.go b/vendor/github.com/mattermost/mattermost-server/v5/model/system.go new file mode 100644 index 0000000..2b54dbc --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/system.go @@ -0,0 +1,209 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "math/big" +) + +const ( + SYSTEM_TELEMETRY_ID = "DiagnosticId" + SYSTEM_RAN_UNIT_TESTS = "RanUnitTests" + SYSTEM_LAST_SECURITY_TIME = "LastSecurityTime" + SYSTEM_ACTIVE_LICENSE_ID = "ActiveLicenseId" + SYSTEM_LICENSE_RENEWAL_TOKEN = "LicenseRenewalToken" + SYSTEM_LAST_COMPLIANCE_TIME = "LastComplianceTime" + SYSTEM_ASYMMETRIC_SIGNING_KEY = "AsymmetricSigningKey" + SYSTEM_POST_ACTION_COOKIE_SECRET = "PostActionCookieSecret" + SYSTEM_INSTALLATION_DATE_KEY = "InstallationDate" + SYSTEM_FIRST_SERVER_RUN_TIMESTAMP_KEY = "FirstServerRunTimestamp" + SYSTEM_CLUSTER_ENCRYPTION_KEY = "ClusterEncryptionKey" + SYSTEM_UPGRADED_FROM_TE_ID = "UpgradedFromTE" + SYSTEM_WARN_METRIC_NUMBER_OF_TEAMS_5 = "warn_metric_number_of_teams_5" + SYSTEM_WARN_METRIC_NUMBER_OF_CHANNELS_50 = "warn_metric_number_of_channels_50" + SYSTEM_WARN_METRIC_MFA = "warn_metric_mfa" + SYSTEM_WARN_METRIC_EMAIL_DOMAIN = "warn_metric_email_domain" + SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_100 = "warn_metric_number_of_active_users_100" + SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_200 = "warn_metric_number_of_active_users_200" + SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_300 = "warn_metric_number_of_active_users_300" + SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_500 = "warn_metric_number_of_active_users_500" + SYSTEM_WARN_METRIC_NUMBER_OF_POSTS_2M = "warn_metric_number_of_posts_2M" + SYSTEM_WARN_METRIC_LAST_RUN_TIMESTAMP_KEY = "LastWarnMetricRunTimestamp" + AWS_METERING_REPORT_INTERVAL = 1 + AWS_METERING_DIMENSION_USAGE_HRS = "UsageHrs" + USER_LIMIT_OVERAGE_CYCLE_END_DATE = "UserLimitOverageCycleEndDate" + OVER_USER_LIMIT_FORGIVEN_COUNT = "OverUserLimitForgivenCount" + OVER_USER_LIMIT_LAST_EMAIL_SENT = "OverUserLimitLastEmailSent" +) + +const ( + WARN_METRIC_STATUS_LIMIT_REACHED = "true" + WARN_METRIC_STATUS_RUNONCE = "runonce" + WARN_METRIC_STATUS_ACK = "ack" + WARN_METRIC_STATUS_STORE_PREFIX = "warn_metric_" + WARN_METRIC_JOB_INTERVAL = 24 * 7 + WARN_METRIC_NUMBER_OF_ACTIVE_USERS_25 = 25 + WARN_METRIC_JOB_WAIT_TIME = 1000 * 3600 * 24 * 7 // 7 days +) + +type System struct { + Name string `json:"name"` + Value string `json:"value"` +} + +func (o *System) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func SystemFromJson(data io.Reader) *System { + var o *System + json.NewDecoder(data).Decode(&o) + return o +} + +type SystemPostActionCookieSecret struct { + Secret []byte `json:"key,omitempty"` +} + +type SystemAsymmetricSigningKey struct { + ECDSAKey *SystemECDSAKey `json:"ecdsa_key,omitempty"` +} + +type SystemECDSAKey struct { + Curve string `json:"curve"` + X *big.Int `json:"x"` + Y *big.Int `json:"y"` + D *big.Int `json:"d,omitempty"` +} + +// ServerBusyState provides serialization for app.Busy. +type ServerBusyState struct { + Busy bool `json:"busy"` + Expires int64 `json:"expires"` + Expires_ts string `json:"expires_ts,omitempty"` +} + +func (sbs *ServerBusyState) ToJson() string { + b, _ := json.Marshal(sbs) + return string(b) +} + +func ServerBusyStateFromJson(r io.Reader) *ServerBusyState { + var sbs *ServerBusyState + json.NewDecoder(r).Decode(&sbs) + return sbs +} + +var WarnMetricsTable = map[string]WarnMetric{ + SYSTEM_WARN_METRIC_MFA: { + Id: SYSTEM_WARN_METRIC_MFA, + Limit: -1, + IsBotOnly: true, + IsRunOnce: true, + }, + SYSTEM_WARN_METRIC_EMAIL_DOMAIN: { + Id: SYSTEM_WARN_METRIC_EMAIL_DOMAIN, + Limit: -1, + IsBotOnly: true, + IsRunOnce: true, + }, + SYSTEM_WARN_METRIC_NUMBER_OF_TEAMS_5: { + Id: SYSTEM_WARN_METRIC_NUMBER_OF_TEAMS_5, + Limit: 5, + IsBotOnly: true, + IsRunOnce: true, + }, + SYSTEM_WARN_METRIC_NUMBER_OF_CHANNELS_50: { + Id: SYSTEM_WARN_METRIC_NUMBER_OF_CHANNELS_50, + Limit: 50, + IsBotOnly: true, + IsRunOnce: true, + }, + SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_100: { + Id: SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_100, + Limit: 100, + IsBotOnly: true, + IsRunOnce: true, + }, + SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_200: { + Id: SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_200, + Limit: 200, + IsBotOnly: true, + IsRunOnce: true, + }, + SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_300: { + Id: SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_300, + Limit: 300, + IsBotOnly: true, + IsRunOnce: true, + }, + SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_500: { + Id: SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_500, + Limit: 500, + IsBotOnly: false, + IsRunOnce: true, + }, + SYSTEM_WARN_METRIC_NUMBER_OF_POSTS_2M: { + Id: SYSTEM_WARN_METRIC_NUMBER_OF_POSTS_2M, + Limit: 2000000, + IsBotOnly: false, + IsRunOnce: true, + }, +} + +type WarnMetric struct { + Id string + Limit int64 + IsBotOnly bool + IsRunOnce bool +} + +type WarnMetricDisplayTexts struct { + BotTitle string + BotMessageBody string + BotSuccessMessage string + EmailBody string +} +type WarnMetricStatus struct { + Id string `json:"id"` + Limit int64 `json:"limit"` + Acked bool `json:"acked"` + StoreStatus string `json:"store_status,omitempty"` +} + +func (wms *WarnMetricStatus) ToJson() string { + b, _ := json.Marshal(wms) + return string(b) +} + +func WarnMetricStatusFromJson(data io.Reader) *WarnMetricStatus { + var o WarnMetricStatus + if err := json.NewDecoder(data).Decode(&o); err != nil { + return nil + } + return &o +} + +func MapWarnMetricStatusToJson(o map[string]*WarnMetricStatus) string { + b, _ := json.Marshal(o) + return string(b) +} + +type SendWarnMetricAck struct { + ForceAck bool `json:"forceAck"` +} + +func (swma *SendWarnMetricAck) ToJson() string { + b, _ := json.Marshal(swma) + return string(b) +} + +func SendWarnMetricAckFromJson(r io.Reader) *SendWarnMetricAck { + var swma *SendWarnMetricAck + json.NewDecoder(r).Decode(&swma) + return swma +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/team.go b/vendor/github.com/mattermost/mattermost-server/v5/model/team.go similarity index 89% rename from vendor/github.com/mattermost/mattermost-server/model/team.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/team.go index b727db9..381eb8b 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/team.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/team.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -136,7 +136,7 @@ func (o *Team) Etag() string { func (o *Team) IsValid() *AppError { - if len(o.Id) != 26 { + if !IsValidId(o.Id) { return NewAppError("Team.IsValid", "model.team.is_valid.id.app_error", nil, "", http.StatusBadRequest) } @@ -203,6 +203,11 @@ func (o *Team) PreSave() { o.CreateAt = GetMillis() o.UpdateAt = o.CreateAt + o.Name = SanitizeUnicode(o.Name) + o.DisplayName = SanitizeUnicode(o.DisplayName) + o.Description = SanitizeUnicode(o.Description) + o.CompanyName = SanitizeUnicode(o.CompanyName) + if len(o.InviteId) == 0 { o.InviteId = NewId() } @@ -210,6 +215,10 @@ func (o *Team) PreSave() { func (o *Team) PreUpdate() { o.UpdateAt = GetMillis() + o.Name = SanitizeUnicode(o.Name) + o.DisplayName = SanitizeUnicode(o.DisplayName) + o.Description = SanitizeUnicode(o.Description) + o.CompanyName = SanitizeUnicode(o.CompanyName) } func IsReservedTeamName(s string) bool { @@ -225,7 +234,6 @@ func IsReservedTeamName(s string) bool { } func IsValidTeamName(s string) bool { - if !IsValidAlphaNum(s) { return false } @@ -268,36 +276,37 @@ func CleanTeamName(s string) string { func (o *Team) Sanitize() { o.Email = "" + o.InviteId = "" } -func (t *Team) Patch(patch *TeamPatch) { +func (o *Team) Patch(patch *TeamPatch) { if patch.DisplayName != nil { - t.DisplayName = *patch.DisplayName + o.DisplayName = *patch.DisplayName } if patch.Description != nil { - t.Description = *patch.Description + o.Description = *patch.Description } if patch.CompanyName != nil { - t.CompanyName = *patch.CompanyName + o.CompanyName = *patch.CompanyName } if patch.AllowedDomains != nil { - t.AllowedDomains = *patch.AllowedDomains + o.AllowedDomains = *patch.AllowedDomains } if patch.AllowOpenInvite != nil { - t.AllowOpenInvite = *patch.AllowOpenInvite + o.AllowOpenInvite = *patch.AllowOpenInvite } if patch.GroupConstrained != nil { - t.GroupConstrained = patch.GroupConstrained + o.GroupConstrained = patch.GroupConstrained } } -func (t *Team) IsGroupConstrained() bool { - return t.GroupConstrained != nil && *t.GroupConstrained +func (o *Team) IsGroupConstrained() bool { + return o.GroupConstrained != nil && *o.GroupConstrained } func (t *TeamPatch) ToJson() string { diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/team_member.go b/vendor/github.com/mattermost/mattermost-server/v5/model/team_member.go new file mode 100644 index 0000000..26ed8d4 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/team_member.go @@ -0,0 +1,186 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strings" +) + +const ( + USERNAME = "Username" +) + +type TeamMember struct { + TeamId string `json:"team_id"` + UserId string `json:"user_id"` + Roles string `json:"roles"` + DeleteAt int64 `json:"delete_at"` + SchemeGuest bool `json:"scheme_guest"` + SchemeUser bool `json:"scheme_user"` + SchemeAdmin bool `json:"scheme_admin"` + ExplicitRoles string `json:"explicit_roles"` +} + +type TeamUnread struct { + TeamId string `json:"team_id"` + MsgCount int64 `json:"msg_count"` + MentionCount int64 `json:"mention_count"` +} + +type TeamMemberForExport struct { + TeamMember + TeamName string +} + +type TeamMemberWithError struct { + UserId string `json:"user_id"` + Member *TeamMember `json:"member"` + Error *AppError `json:"error"` +} + +type EmailInviteWithError struct { + Email string `json:"email"` + Error *AppError `json:"error"` +} + +type TeamMembersGetOptions struct { + // Sort the team members. Accepts "Username", but defaults to "Id". + Sort string + + // If true, exclude team members whose corresponding user is deleted. + ExcludeDeletedUsers bool + + // Restrict to search in a list of teams and channels + ViewRestrictions *ViewUsersRestrictions +} + +func (o *TeamMember) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func (o *TeamUnread) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func TeamMemberFromJson(data io.Reader) *TeamMember { + var o *TeamMember + json.NewDecoder(data).Decode(&o) + return o +} + +func TeamUnreadFromJson(data io.Reader) *TeamUnread { + var o *TeamUnread + json.NewDecoder(data).Decode(&o) + return o +} + +func EmailInviteWithErrorFromJson(data io.Reader) []*EmailInviteWithError { + var o []*EmailInviteWithError + json.NewDecoder(data).Decode(&o) + return o +} + +func EmailInviteWithErrorToEmails(o []*EmailInviteWithError) []string { + var ret []string + for _, o := range o { + if o.Error == nil { + ret = append(ret, o.Email) + } + } + return ret +} + +func EmailInviteWithErrorToJson(o []*EmailInviteWithError) string { + b, err := json.Marshal(o) + if err != nil { + return "[]" + } + return string(b) +} + +func EmailInviteWithErrorToString(o *EmailInviteWithError) string { + return fmt.Sprintf("%s:%s", o.Email, o.Error.Error()) +} + +func TeamMembersWithErrorToTeamMembers(o []*TeamMemberWithError) []*TeamMember { + var ret []*TeamMember + for _, o := range o { + if o.Error == nil { + ret = append(ret, o.Member) + } + } + return ret +} + +func TeamMembersWithErrorToJson(o []*TeamMemberWithError) string { + b, err := json.Marshal(o) + if err != nil { + return "[]" + } + return string(b) +} + +func TeamMemberWithErrorToString(o *TeamMemberWithError) string { + return fmt.Sprintf("%s:%s", o.UserId, o.Error.Error()) +} + +func TeamMembersWithErrorFromJson(data io.Reader) []*TeamMemberWithError { + var o []*TeamMemberWithError + json.NewDecoder(data).Decode(&o) + return o +} + +func TeamMembersToJson(o []*TeamMember) string { + b, err := json.Marshal(o) + if err != nil { + return "[]" + } + return string(b) +} + +func TeamMembersFromJson(data io.Reader) []*TeamMember { + var o []*TeamMember + json.NewDecoder(data).Decode(&o) + return o +} + +func TeamsUnreadToJson(o []*TeamUnread) string { + b, err := json.Marshal(o) + if err != nil { + return "[]" + } + return string(b) +} + +func TeamsUnreadFromJson(data io.Reader) []*TeamUnread { + var o []*TeamUnread + json.NewDecoder(data).Decode(&o) + return o +} + +func (o *TeamMember) IsValid() *AppError { + + if !IsValidId(o.TeamId) { + return NewAppError("TeamMember.IsValid", "model.team_member.is_valid.team_id.app_error", nil, "", http.StatusBadRequest) + } + + if !IsValidId(o.UserId) { + return NewAppError("TeamMember.IsValid", "model.team_member.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (o *TeamMember) PreUpdate() { +} + +func (o *TeamMember) GetRoles() []string { + return strings.Fields(o.Roles) +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/team_search.go b/vendor/github.com/mattermost/mattermost-server/v5/model/team_search.go new file mode 100644 index 0000000..f9de580 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/team_search.go @@ -0,0 +1,44 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type TeamSearch struct { + Term string `json:"term"` + Page *int `json:"page,omitempty"` + PerPage *int `json:"per_page,omitempty"` + AllowOpenInvite *bool `json:"allow_open_invite,omitempty"` + GroupConstrained *bool `json:"group_constrained,omitempty"` + IncludeGroupConstrained *bool `json:"include_group_constrained,omitempty"` +} + +func (t *TeamSearch) IsPaginated() bool { + return t.Page != nil && t.PerPage != nil +} + +// ToJson convert a TeamSearch to json string +func (t *TeamSearch) ToJson() string { + b, err := json.Marshal(t) + if err != nil { + return "" + } + + return string(b) +} + +// TeamSearchFromJson decodes the input and returns a TeamSearch +func TeamSearchFromJson(data io.Reader) *TeamSearch { + decoder := json.NewDecoder(data) + var cs TeamSearch + err := decoder.Decode(&cs) + if err == nil { + return &cs + } + + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/team_stats.go b/vendor/github.com/mattermost/mattermost-server/v5/model/team_stats.go similarity index 80% rename from vendor/github.com/mattermost/mattermost-server/model/team_stats.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/team_stats.go index 0d688b8..9209a0c 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/team_stats.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/team_stats.go @@ -1,5 +1,5 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model diff --git a/vendor/github.com/mattermost/mattermost-server/model/terms_of_service.go b/vendor/github.com/mattermost/mattermost-server/v5/model/terms_of_service.go similarity index 94% rename from vendor/github.com/mattermost/mattermost-server/model/terms_of_service.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/terms_of_service.go index e43f890..8ce5d35 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/terms_of_service.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/terms_of_service.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -21,7 +21,7 @@ type TermsOfService struct { } func (t *TermsOfService) IsValid() *AppError { - if len(t.Id) != 26 { + if !IsValidId(t.Id) { return InvalidTermsOfServiceError("id", "") } @@ -29,7 +29,7 @@ func (t *TermsOfService) IsValid() *AppError { return InvalidTermsOfServiceError("create_at", t.Id) } - if len(t.UserId) != 26 { + if !IsValidId(t.UserId) { return InvalidTermsOfServiceError("user_id", t.Id) } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/thread.go b/vendor/github.com/mattermost/mattermost-server/v5/model/thread.go new file mode 100644 index 0000000..02c50b9 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/thread.go @@ -0,0 +1,85 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" +) + +type Thread struct { + PostId string `json:"id"` + ChannelId string `json:"channel_id"` + ReplyCount int64 `json:"reply_count"` + LastReplyAt int64 `json:"last_reply_at"` + Participants StringArray `json:"participants"` +} + +type ThreadResponse struct { + PostId string `json:"id"` + ReplyCount int64 `json:"reply_count"` + LastReplyAt int64 `json:"last_reply_at"` + LastViewedAt int64 `json:"last_viewed_at"` + Participants []*User `json:"participants"` + Post *Post `json:"post"` + UnreadReplies int64 `json:"unread_replies"` + UnreadMentions int64 `json:"unread_mentions"` +} + +type Threads struct { + Total int64 `json:"total"` + TotalUnreadThreads int64 `json:"total_unread_threads"` + TotalUnreadMentions int64 `json:"total_unread_mentions"` + Threads []*ThreadResponse `json:"threads"` +} + +type GetUserThreadsOpts struct { + // Page specifies which part of the results to return, by PageSize. Default = 0 + Page uint64 + + // PageSize specifies the size of the returned chunk of results. Default = 30 + PageSize uint64 + + // Extended will enrich the response with participant details. Default = false + Extended bool + + // Deleted will specify that even deleted threads should be returned (For mobile sync). Default = false + Deleted bool + + // Since filters the threads based on their LastUpdateAt timestamp. + Since uint64 +} + +func (o *Threads) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func (o *Thread) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func ThreadFromJson(s string) (*Thread, error) { + var t Thread + err := json.Unmarshal([]byte(s), &t) + return &t, err +} + +func (o *Thread) Etag() string { + return Etag(o.PostId, o.LastReplyAt) +} + +type ThreadMembership struct { + PostId string `json:"post_id"` + UserId string `json:"user_id"` + Following bool `json:"following"` + LastViewed int64 `json:"last_view_at"` + LastUpdated int64 `json:"last_update_at"` + UnreadMentions int64 `json:"unread_mentions"` +} + +func (o *ThreadMembership) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/token.go b/vendor/github.com/mattermost/mattermost-server/v5/model/token.go similarity index 85% rename from vendor/github.com/mattermost/mattermost-server/model/token.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/token.go index 290d577..2dcf414 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/token.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/token.go @@ -1,9 +1,11 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model -import "net/http" +import ( + "net/http" +) const ( TOKEN_SIZE = 64 diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/typing_request.go b/vendor/github.com/mattermost/mattermost-server/v5/model/typing_request.go new file mode 100644 index 0000000..e2e9d3b --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/typing_request.go @@ -0,0 +1,25 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type TypingRequest struct { + ChannelId string `json:"channel_id"` + ParentId string `json:"parent_id"` +} + +func (o *TypingRequest) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func TypingRequestFromJson(data io.Reader) *TypingRequest { + var o *TypingRequest + json.NewDecoder(data).Decode(&o) + return o +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/upload_session.go b/vendor/github.com/mattermost/mattermost-server/v5/model/upload_session.go new file mode 100644 index 0000000..05994b9 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/upload_session.go @@ -0,0 +1,144 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "fmt" + "io" + "net/http" +) + +// UploadType defines the type of an upload. +type UploadType string + +const ( + UploadTypeAttachment UploadType = "attachment" + UploadTypeImport UploadType = "import" +) + +// UploadNoUserID is a "fake" user id used by the API layer when in local mode. +const UploadNoUserID = "nouser" + +// UploadSession contains information used to keep track of a file upload. +type UploadSession struct { + // The unique identifier for the session. + Id string `json:"id"` + // The type of the upload. + Type UploadType `json:"type"` + // The timestamp of creation. + CreateAt int64 `json:"create_at"` + // The id of the user performing the upload. + UserId string `json:"user_id"` + // The id of the channel to upload to. + ChannelId string `json:"channel_id,omitempty"` + // The name of the file to upload. + Filename string `json:"filename"` + // The path where the file is stored. + Path string `json:"-"` + // The size of the file to upload. + FileSize int64 `json:"file_size"` + // The amount of received data in bytes. If equal to FileSize it means the + // upload has finished. + FileOffset int64 `json:"file_offset"` +} + +// ToJson serializes the UploadSession into JSON and returns it as string. +func (us *UploadSession) ToJson() string { + b, _ := json.Marshal(us) + return string(b) +} + +// UploadSessionsToJson serializes a list of UploadSession into JSON and +// returns it as string. +func UploadSessionsToJson(uss []*UploadSession) string { + b, _ := json.Marshal(uss) + return string(b) +} + +// UploadSessionsFromJson deserializes a list of UploadSession from JSON data. +func UploadSessionsFromJson(data io.Reader) []*UploadSession { + decoder := json.NewDecoder(data) + var uss []*UploadSession + if err := decoder.Decode(&uss); err != nil { + return nil + } + return uss +} + +// UploadSessionFromJson deserializes the UploadSession from JSON data. +func UploadSessionFromJson(data io.Reader) *UploadSession { + decoder := json.NewDecoder(data) + var us UploadSession + if err := decoder.Decode(&us); err != nil { + return nil + } + return &us +} + +// PreSave is a utility function used to fill required information. +func (us *UploadSession) PreSave() { + if us.Id == "" { + us.Id = NewId() + } + + if us.CreateAt == 0 { + us.CreateAt = GetMillis() + } +} + +// IsValid validates an UploadType. It returns an error in case of +// failure. +func (t UploadType) IsValid() error { + switch t { + case UploadTypeAttachment: + return nil + case UploadTypeImport: + return nil + default: + } + return fmt.Errorf("invalid UploadType %s", t) +} + +// IsValid validates an UploadSession. It returns an error in case of +// failure. +func (us *UploadSession) IsValid() *AppError { + if !IsValidId(us.Id) { + return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.id.app_error", nil, "", http.StatusBadRequest) + } + + if err := us.Type.IsValid(); err != nil { + return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.type.app_error", nil, err.Error(), http.StatusBadRequest) + } + + if !IsValidId(us.UserId) && us.UserId != UploadNoUserID { + return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.user_id.app_error", nil, "id="+us.Id, http.StatusBadRequest) + } + + if us.Type == UploadTypeAttachment && !IsValidId(us.ChannelId) { + return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.channel_id.app_error", nil, "id="+us.Id, http.StatusBadRequest) + } + + if us.CreateAt == 0 { + return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.create_at.app_error", nil, "id="+us.Id, http.StatusBadRequest) + } + + if us.Filename == "" { + return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.filename.app_error", nil, "id="+us.Id, http.StatusBadRequest) + } + + if us.FileSize <= 0 { + return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.file_size.app_error", nil, "id="+us.Id, http.StatusBadRequest) + } + + if us.FileOffset < 0 || us.FileOffset > us.FileSize { + return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.file_offset.app_error", nil, "id="+us.Id, http.StatusBadRequest) + } + + if us.Path == "" { + return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.path.app_error", nil, "id="+us.Id, http.StatusBadRequest) + } + + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/user.go b/vendor/github.com/mattermost/mattermost-server/v5/model/user.go similarity index 85% rename from vendor/github.com/mattermost/mattermost-server/model/user.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/user.go index ee8c8e7..4dbd002 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/user.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/user.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -9,20 +9,25 @@ import ( "fmt" "io" "io/ioutil" + "math/rand" "net/http" "regexp" "sort" "strings" + "sync" + "time" "unicode/utf8" - "github.com/mattermost/mattermost-server/services/timezones" "golang.org/x/crypto/bcrypt" "golang.org/x/text/language" + + "github.com/mattermost/mattermost-server/v5/services/timezones" ) const ( ME = "me" USER_NOTIFY_ALL = "all" + USER_NOTIFY_HERE = "here" USER_NOTIFY_MENTION = "mention" USER_NOTIFY_NONE = "none" DESKTOP_NOTIFY_PROP = "desktop" @@ -56,6 +61,11 @@ const ( USER_LOCALE_MAX_LENGTH = 5 ) +//msgp:tuple User + +// User contains the details about the user. +// This struct's serializer methods are auto-generated. If a new field is added/removed, +// please run make gen-serialized. type User struct { Id string `json:"id"` CreateAt int64 `json:"create_at,omitempty"` @@ -85,10 +95,17 @@ type User struct { LastActivityAt int64 `db:"-" json:"last_activity_at,omitempty"` IsBot bool `db:"-" json:"is_bot,omitempty"` BotDescription string `db:"-" json:"bot_description,omitempty"` + BotLastIconUpdate int64 `db:"-" json:"bot_last_icon_update,omitempty"` TermsOfServiceId string `db:"-" json:"terms_of_service_id,omitempty"` TermsOfServiceCreateAt int64 `db:"-" json:"terms_of_service_create_at,omitempty"` } +//msgp UserMap + +// UserMap is a map from a userId to a user object. +// It is used to generate methods which can be used for fast serialization/de-serialization. +type UserMap map[string]*User + type UserUpdate struct { Old *User New *User @@ -120,6 +137,7 @@ type UserForIndexing struct { Nickname string `json:"nickname"` FirstName string `json:"first_name"` LastName string `json:"last_name"` + Roles string `json:"roles"` CreateAt int64 `json:"create_at"` DeleteAt int64 `json:"delete_at"` TeamsIds []string `json:"team_id"` @@ -234,7 +252,7 @@ func (u *User) DeepCopy() *User { // correctly. func (u *User) IsValid() *AppError { - if len(u.Id) != 26 { + if !IsValidId(u.Id) { return InvalidUserError("id", "") } @@ -326,6 +344,11 @@ func (u *User) PreSave() { u.AuthData = nil } + u.Username = SanitizeUnicode(u.Username) + u.FirstName = SanitizeUnicode(u.FirstName) + u.LastName = SanitizeUnicode(u.LastName) + u.Nickname = SanitizeUnicode(u.Nickname) + u.Username = NormalizeUsername(u.Username) u.Email = NormalizeEmail(u.Email) @@ -359,10 +382,21 @@ func (u *User) PreSave() { // PreUpdate should be run before updating the user in the db. func (u *User) PreUpdate() { + u.Username = SanitizeUnicode(u.Username) + u.FirstName = SanitizeUnicode(u.FirstName) + u.LastName = SanitizeUnicode(u.LastName) + u.Nickname = SanitizeUnicode(u.Nickname) + u.BotDescription = SanitizeUnicode(u.BotDescription) + u.Username = NormalizeUsername(u.Username) u.Email = NormalizeEmail(u.Email) u.UpdateAt = GetMillis() + u.FirstName = SanitizeUnicode(u.FirstName) + u.LastName = SanitizeUnicode(u.LastName) + u.Nickname = SanitizeUnicode(u.Nickname) + u.BotDescription = SanitizeUnicode(u.BotDescription) + if u.AuthData != nil && *u.AuthData == "" { u.AuthData = nil } @@ -388,28 +422,43 @@ func (u *User) SetDefaultNotifications() { u.NotifyProps[PUSH_NOTIFY_PROP] = USER_NOTIFY_MENTION u.NotifyProps[DESKTOP_NOTIFY_PROP] = USER_NOTIFY_MENTION u.NotifyProps[DESKTOP_SOUND_NOTIFY_PROP] = "true" - u.NotifyProps[MENTION_KEYS_NOTIFY_PROP] = u.Username + ",@" + u.Username + u.NotifyProps[MENTION_KEYS_NOTIFY_PROP] = "" u.NotifyProps[CHANNEL_MENTIONS_NOTIFY_PROP] = "true" u.NotifyProps[PUSH_STATUS_NOTIFY_PROP] = STATUS_AWAY u.NotifyProps[COMMENTS_NOTIFY_PROP] = COMMENTS_NOTIFY_NEVER u.NotifyProps[FIRST_NAME_NOTIFY_PROP] = "false" } -func (user *User) UpdateMentionKeysFromUsername(oldUsername string) { +func (u *User) UpdateMentionKeysFromUsername(oldUsername string) { nonUsernameKeys := []string{} - splitKeys := strings.Split(user.NotifyProps[MENTION_KEYS_NOTIFY_PROP], ",") - for _, key := range splitKeys { + for _, key := range u.GetMentionKeys() { if key != oldUsername && key != "@"+oldUsername { nonUsernameKeys = append(nonUsernameKeys, key) } } - user.NotifyProps[MENTION_KEYS_NOTIFY_PROP] = user.Username + ",@" + user.Username + u.NotifyProps[MENTION_KEYS_NOTIFY_PROP] = "" if len(nonUsernameKeys) > 0 { - user.NotifyProps[MENTION_KEYS_NOTIFY_PROP] += "," + strings.Join(nonUsernameKeys, ",") + u.NotifyProps[MENTION_KEYS_NOTIFY_PROP] += "," + strings.Join(nonUsernameKeys, ",") } } +func (u *User) GetMentionKeys() []string { + var keys []string + + for _, key := range strings.Split(u.NotifyProps[MENTION_KEYS_NOTIFY_PROP], ",") { + trimmedKey := strings.TrimSpace(key) + + if trimmedKey == "" { + continue + } + + keys = append(keys, trimmedKey) + } + + return keys +} + func (u *User) Patch(patch *UserPatch) { if patch.Username != nil { u.Username = *patch.Username @@ -470,7 +519,7 @@ func (u *UserAuth) ToJson() string { // Generate a valid strong etag so the browser can cache the results func (u *User) Etag(showFullName, showEmail bool) string { - return Etag(u.Id, u.UpdateAt, u.TermsOfServiceId, u.TermsOfServiceCreateAt, showFullName, showEmail) + return Etag(u.Id, u.UpdateAt, u.TermsOfServiceId, u.TermsOfServiceCreateAt, showFullName, showEmail, u.BotLastIconUpdate) } // Remove any private data from the user object @@ -495,13 +544,15 @@ func (u *User) Sanitize(options map[string]bool) { } // Remove any input data from the user object that is not user controlled -func (u *User) SanitizeInput() { - u.AuthData = NewString("") - u.AuthService = "" +func (u *User) SanitizeInput(isAdmin bool) { + if !isAdmin { + u.AuthData = NewString("") + u.AuthService = "" + u.EmailVerified = false + } u.LastPasswordUpdate = 0 u.LastPictureUpdate = 0 u.FailedAttempts = 0 - u.EmailVerified = false u.MfaActive = false u.MfaSecret = "" } @@ -613,6 +664,10 @@ func (u *User) IsGuest() bool { return IsInRole(u.Roles, SYSTEM_GUEST_ROLE_ID) } +func (u *User) IsSystemAdmin() bool { + return IsInRole(u.Roles, SYSTEM_ADMIN_ROLE_ID) +} + // Make sure you acually want to use this function. In context.go there are functions to check permissions // This function should not be used to check permissions. func (u *User) IsInRole(inRole string) bool { @@ -836,3 +891,40 @@ func UsersWithGroupsAndCountFromJson(data io.Reader) *UsersWithGroupsAndCount { json.Unmarshal(bodyBytes, uwg) return uwg } + +type lockedRand struct { + mu sync.Mutex + rn *rand.Rand +} + +func (r *lockedRand) Intn(n int) int { + r.mu.Lock() + m := r.rn.Intn(n) + r.mu.Unlock() + return m +} + +var passwordRandom = lockedRand{ + rn: rand.New(rand.NewSource(time.Now().Unix())), +} + +var passwordSpecialChars = "!$%^&*(),." +var passwordNumbers = "0123456789" +var passwordUpperCaseLetters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" +var passwordLowerCaseLetters = "abcdefghijklmnopqrstuvwxyz" +var passwordAllChars = passwordSpecialChars + passwordNumbers + passwordUpperCaseLetters + passwordLowerCaseLetters + +func GeneratePassword(minimumLength int) string { + // Make sure we are guaranteed at least one of each type to meet any possible password complexity requirements. + password := string([]rune(passwordUpperCaseLetters)[passwordRandom.Intn(len(passwordUpperCaseLetters))]) + + string([]rune(passwordNumbers)[passwordRandom.Intn(len(passwordNumbers))]) + + string([]rune(passwordLowerCaseLetters)[passwordRandom.Intn(len(passwordLowerCaseLetters))]) + + string([]rune(passwordSpecialChars)[passwordRandom.Intn(len(passwordSpecialChars))]) + + for len(password) < minimumLength { + i := passwordRandom.Intn(len(passwordAllChars)) + password = password + string([]rune(passwordAllChars)[i]) + } + + return password +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/user_access_token.go b/vendor/github.com/mattermost/mattermost-server/v5/model/user_access_token.go similarity index 90% rename from vendor/github.com/mattermost/mattermost-server/model/user_access_token.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/user_access_token.go index bffd9fc..f458a6d 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/user_access_token.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/user_access_token.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -18,7 +18,7 @@ type UserAccessToken struct { } func (t *UserAccessToken) IsValid() *AppError { - if len(t.Id) != 26 { + if !IsValidId(t.Id) { return NewAppError("UserAccessToken.IsValid", "model.user_access_token.is_valid.id.app_error", nil, "", http.StatusBadRequest) } @@ -26,7 +26,7 @@ func (t *UserAccessToken) IsValid() *AppError { return NewAppError("UserAccessToken.IsValid", "model.user_access_token.is_valid.token.app_error", nil, "", http.StatusBadRequest) } - if len(t.UserId) != 26 { + if !IsValidId(t.UserId) { return NewAppError("UserAccessToken.IsValid", "model.user_access_token.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) } diff --git a/vendor/github.com/mattermost/mattermost-server/model/user_access_token_search.go b/vendor/github.com/mattermost/mattermost-server/v5/model/user_access_token_search.go similarity index 84% rename from vendor/github.com/mattermost/mattermost-server/model/user_access_token_search.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/user_access_token_search.go index 1b0146e..a692f69 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/user_access_token_search.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/user_access_token_search.go @@ -1,5 +1,5 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model diff --git a/vendor/github.com/mattermost/mattermost-server/model/user_autocomplete.go b/vendor/github.com/mattermost/mattermost-server/v5/model/user_autocomplete.go similarity index 88% rename from vendor/github.com/mattermost/mattermost-server/model/user_autocomplete.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/user_autocomplete.go index b5edb45..118e138 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/user_autocomplete.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/user_autocomplete.go @@ -1,5 +1,5 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -31,11 +31,10 @@ func UserAutocompleteFromJson(data io.Reader) *UserAutocomplete { decoder := json.NewDecoder(data) autocomplete := new(UserAutocomplete) err := decoder.Decode(&autocomplete) - if err == nil { - return autocomplete - } else { + if err != nil { return nil } + return autocomplete } func (o *UserAutocompleteInChannel) ToJson() string { diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/user_count.go b/vendor/github.com/mattermost/mattermost-server/v5/model/user_count.go new file mode 100644 index 0000000..ee47488 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/user_count.go @@ -0,0 +1,26 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +// Options for counting users +type UserCountOptions struct { + // Should include users that are bots + IncludeBotAccounts bool + // Should include deleted users (of any type) + IncludeDeleted bool + // Exclude regular users + ExcludeRegularUsers bool + // Only include users on a specific team. "" for any team. + TeamId string + // Only include users on a specific channel. "" for any channel. + ChannelId string + // Restrict to search in a list of teams and channels + ViewRestrictions *ViewUsersRestrictions + // Only include users matching any of the given system wide roles. + Roles []string + // Only include users matching any of the given channel roles, must be used with ChannelId. + ChannelRoles []string + // Only include users matching any of the given team roles, must be used with TeamId. + TeamRoles []string +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/user_get.go b/vendor/github.com/mattermost/mattermost-server/v5/model/user_get.go similarity index 60% rename from vendor/github.com/mattermost/mattermost-server/model/user_get.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/user_get.go index d4863fe..2748d73 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/user_get.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/user_get.go @@ -1,5 +1,5 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -12,14 +12,24 @@ type UserGetOptions struct { InChannelId string // Filters the users not in the channel NotInChannelId string + // Filters the users in the group + InGroupId string // Filters the users group constrained GroupConstrained bool // Filters the users without a team WithoutTeam bool // Filters the inactive users Inactive bool + // Filters the active users + Active bool // Filters for the given role Role string + // Filters for users matching any of the given system wide roles + Roles []string + // Filters for users matching any of the given channel roles, must be used with InChannelId + ChannelRoles []string + // Filters for users matching any of the given team roles, must be used with InTeamId + TeamRoles []string // Sorting option Sort string // Restrict to search in a list of teams and channels diff --git a/vendor/github.com/mattermost/mattermost-server/model/user_search.go b/vendor/github.com/mattermost/mattermost-server/v5/model/user_search.go similarity index 56% rename from vendor/github.com/mattermost/mattermost-server/model/user_search.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/user_search.go index 2a814d0..0a721ea 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/user_search.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/user_search.go @@ -1,5 +1,5 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -13,16 +13,20 @@ const USER_SEARCH_DEFAULT_LIMIT = 100 // UserSearch captures the parameters provided by a client for initiating a user search. type UserSearch struct { - Term string `json:"term"` - TeamId string `json:"team_id"` - NotInTeamId string `json:"not_in_team_id"` - InChannelId string `json:"in_channel_id"` - NotInChannelId string `json:"not_in_channel_id"` - GroupConstrained bool `json:"group_constrained"` - AllowInactive bool `json:"allow_inactive"` - WithoutTeam bool `json:"without_team"` - Limit int `json:"limit"` - Role string `json:"role"` + Term string `json:"term"` + TeamId string `json:"team_id"` + NotInTeamId string `json:"not_in_team_id"` + InChannelId string `json:"in_channel_id"` + NotInChannelId string `json:"not_in_channel_id"` + InGroupId string `json:"in_group_id"` + GroupConstrained bool `json:"group_constrained"` + AllowInactive bool `json:"allow_inactive"` + WithoutTeam bool `json:"without_team"` + Limit int `json:"limit"` + Role string `json:"role"` + Roles []string `json:"roles"` + ChannelRoles []string `json:"channel_roles"` + TeamRoles []string `json:"team_roles"` } // ToJson convert a User to a json string @@ -60,6 +64,14 @@ type UserSearchOptions struct { Limit int // Filters for the given role Role string + // Filters for users that have any of the given system roles + Roles []string + // Filters for users that have the given channel roles to be used when searching in a channel + ChannelRoles []string + // Filters for users that have the given team roles to be used when searching in a team + TeamRoles []string // Restrict to search in a list of teams and channels ViewRestrictions *ViewUsersRestrictions + // List of allowed channels + ListOfAllowedChannels []string } diff --git a/vendor/github.com/mattermost/mattermost-server/model/user_terms_of_service.go b/vendor/github.com/mattermost/mattermost-server/v5/model/user_terms_of_service.go similarity index 88% rename from vendor/github.com/mattermost/mattermost-server/model/user_terms_of_service.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/user_terms_of_service.go index b714f92..9a0f4f1 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/user_terms_of_service.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/user_terms_of_service.go @@ -1,5 +1,5 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -17,11 +17,11 @@ type UserTermsOfService struct { } func (ut *UserTermsOfService) IsValid() *AppError { - if len(ut.UserId) != 26 { + if !IsValidId(ut.UserId) { return InvalidUserTermsOfServiceError("user_id", ut.UserId) } - if len(ut.TermsOfServiceId) != 26 { + if !IsValidId(ut.TermsOfServiceId) { return InvalidUserTermsOfServiceError("terms_of_service_id", ut.UserId) } diff --git a/vendor/github.com/mattermost/mattermost-server/model/users_stats.go b/vendor/github.com/mattermost/mattermost-server/v5/model/users_stats.go similarity index 75% rename from vendor/github.com/mattermost/mattermost-server/model/users_stats.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/users_stats.go index 49c882e..3e18296 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/users_stats.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/users_stats.go @@ -1,5 +1,5 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model diff --git a/vendor/github.com/mattermost/mattermost-server/model/utils.go b/vendor/github.com/mattermost/mattermost-server/v5/model/utils.go similarity index 79% rename from vendor/github.com/mattermost/mattermost-server/model/utils.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/utils.go index 039e756..f8c8a47 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/utils.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/utils.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -18,6 +18,7 @@ import ( "regexp" "strconv" "strings" + "sync" "time" "unicode" @@ -36,6 +37,26 @@ type StringInterface map[string]interface{} type StringMap map[string]string type StringArray []string +func (sa StringArray) Remove(input string) StringArray { + for index := range sa { + if sa[index] == input { + ret := make(StringArray, 0, len(sa)-1) + ret = append(ret, sa[:index]...) + return append(ret, sa[index+1:]...) + } + } + return sa +} + +func (sa StringArray) Contains(input string) bool { + for index := range sa { + if sa[index] == input { + return true + } + } + + return false +} func (sa StringArray) Equals(input StringArray) bool { if len(sa) != len(input) { @@ -52,10 +73,13 @@ func (sa StringArray) Equals(input StringArray) bool { return true } -var translateFunc goi18n.TranslateFunc = nil +var translateFunc goi18n.TranslateFunc +var translateFuncOnce sync.Once func AppErrorInit(t goi18n.TranslateFunc) { - translateFunc = t + translateFuncOnce.Do(func() { + translateFunc = t + }) } type AppError struct { @@ -89,9 +113,8 @@ func (er *AppError) Translate(T goi18n.TranslateFunc) { func (er *AppError) SystemMessage(T goi18n.TranslateFunc) string { if er.params == nil { return T(er.Id) - } else { - return T(er.Id, er.params) } + return T(er.Id, er.params) } func (er *AppError) ToJson() string { @@ -112,11 +135,10 @@ func AppErrorFromJson(data io.Reader) *AppError { decoder := json.NewDecoder(strings.NewReader(str)) var er AppError err := decoder.Decode(&er) - if err == nil { - return &er - } else { + if err != nil { return NewAppError("AppErrorFromJson", "model.utils.decode_json.app_error", nil, "body: "+str, http.StatusInternalServerError) } + return &er } func NewAppError(where string, id string, params map[string]interface{}, details string, status int) *AppError { @@ -146,15 +168,29 @@ func NewId() string { return b.String() } +// NewRandomTeamName is a NewId that will be a valid team name. +func NewRandomTeamName() string { + teamName := NewId() + for IsReservedTeamName(teamName) { + teamName = NewId() + } + return teamName +} + +// NewRandomString returns a random string of the given length. +// The resulting entropy will be (5 * length) bits. func NewRandomString(length int) string { - var b bytes.Buffer - str := make([]byte, length+8) - rand.Read(str) - encoder := base32.NewEncoder(encoding, &b) - encoder.Write(str) - encoder.Close() - b.Truncate(length) // removes the '==' padding - return b.String() + data := make([]byte, 1+(length*5/8)) + rand.Read(data) + return encoding.EncodeToString(data)[:length] +} + +// NewRandomBase32String returns a base32 encoded string of a random slice +// of bytes of the given size. The resulting entropy will be (8 * size) bits. +func NewRandomBase32String(size int) string { + data := make([]byte, size) + rand.Read(data) + return base32.StdEncoding.EncodeToString(data) } // GetMillis is a convenience method to get milliseconds since epoch. @@ -207,7 +243,7 @@ func MapToJson(objmap map[string]string) string { return string(b) } -// MapToJson converts a map to a json string +// MapBoolToJson converts a map to a json string func MapBoolToJson(objmap map[string]bool) string { b, _ := json.Marshal(objmap) return string(b) @@ -220,9 +256,8 @@ func MapFromJson(data io.Reader) map[string]string { var objmap map[string]string if err := decoder.Decode(&objmap); err != nil { return make(map[string]string) - } else { - return objmap } + return objmap } // MapFromJson will decode the key/value pair map @@ -232,9 +267,8 @@ func MapBoolFromJson(data io.Reader) map[string]bool { var objmap map[string]bool if err := decoder.Decode(&objmap); err != nil { return make(map[string]bool) - } else { - return objmap } + return objmap } func ArrayToJson(objmap []string) string { @@ -248,9 +282,8 @@ func ArrayFromJson(data io.Reader) []string { var objmap []string if err := decoder.Decode(&objmap); err != nil { return make([]string, 0) - } else { - return objmap } + return objmap } func ArrayFromInterface(data interface{}) []string { @@ -281,9 +314,8 @@ func StringInterfaceFromJson(data io.Reader) map[string]interface{} { var objmap map[string]interface{} if err := decoder.Decode(&objmap); err != nil { return make(map[string]interface{}) - } else { - return objmap } + return objmap } func StringToJson(s string) string { @@ -297,9 +329,8 @@ func StringFromJson(data io.Reader) string { var s string if err := decoder.Decode(&s); err != nil { return "" - } else { - return s } + return s } func GetServerIpAddress(iface string) string { @@ -358,15 +389,20 @@ func IsValidEmail(email string) bool { } var reservedName = []string{ - "signup", - "login", "admin", - "channel", - "post", "api", - "oauth", + "channel", + "claim", "error", "help", + "landing", + "login", + "mfa", + "oauth", + "plug", + "plugins", + "post", + "signup", } func IsValidChannelIdentifier(s string) bool { @@ -461,9 +497,8 @@ func GetImageMimeType(ext string) string { ext = strings.ToLower(ext) if len(IMAGE_MIME_TYPES[ext]) == 0 { return "image" - } else { - return IMAGE_MIME_TYPES[ext] } + return IMAGE_MIME_TYPES[ext] } func ClearMentionTags(post string) string { @@ -477,7 +512,7 @@ func IsValidHttpUrl(rawUrl string) bool { return false } - if _, err := url.ParseRequestURI(rawUrl); err != nil { + if u, err := url.ParseRequestURI(rawUrl); err != nil || u.Scheme == "" || u.Host == "" { return false } @@ -624,3 +659,61 @@ func GetPreferredTimezone(timezone StringMap) string { return timezone["manualTimezone"] } + +// IsSamlFile checks if filename is a SAML file. +func IsSamlFile(saml *SamlSettings, filename string) bool { + return filename == *saml.PublicCertificateFile || filename == *saml.PrivateKeyFile || filename == *saml.IdpCertificateFile +} + +func AsStringBoolMap(list []string) map[string]bool { + listMap := map[string]bool{} + for _, p := range list { + listMap[p] = true + } + return listMap +} + +// SanitizeUnicode will remove undesirable Unicode characters from a string. +func SanitizeUnicode(s string) string { + return strings.Map(filterBlocklist, s) +} + +// filterBlocklist returns `r` if it is not in the blocklist, otherwise drop (-1). +// Blocklist is taken from https://www.w3.org/TR/unicode-xml/#Charlist +func filterBlocklist(r rune) rune { + const drop = -1 + switch r { + case '\u0340', '\u0341': // clones of grave and acute; deprecated in Unicode + return drop + case '\u17A3', '\u17D3': // obsolete characters for Khmer; deprecated in Unicode + return drop + case '\u2028', '\u2029': // line and paragraph separator + return drop + case '\u202A', '\u202B', '\u202C', '\u202D', '\u202E': // BIDI embedding controls + return drop + case '\u206A', '\u206B': // activate/inhibit symmetric swapping; deprecated in Unicode + return drop + case '\u206C', '\u206D': // activate/inhibit Arabic form shaping; deprecated in Unicode + return drop + case '\u206E', '\u206F': // activate/inhibit national digit shapes; deprecated in Unicode + return drop + case '\uFFF9', '\uFFFA', '\uFFFB': // interlinear annotation characters + return drop + case '\uFEFF': // byte order mark + return drop + case '\uFFFC': // object replacement character + return drop + } + + // Scoping for musical notation + if r >= 0x0001D173 && r <= 0x0001D17A { + return drop + } + + // Language tag code points + if r >= 0x000E0000 && r <= 0x000E007F { + return drop + } + + return r +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/version.go b/vendor/github.com/mattermost/mattermost-server/v5/model/version.go similarity index 91% rename from vendor/github.com/mattermost/mattermost-server/model/version.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/version.go index 6fc625b..5ecd2e7 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/version.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/version.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package model @@ -13,6 +13,26 @@ import ( // It should be maintained in chronological order with most current // release at the front of the list. var versions = []string{ + "5.32.1", + "5.32.0", + "5.31.0", + "5.30.2", + "5.30.1", + "5.30.0", + "5.29.0", + "5.28.0", + "5.27.0", + "5.26.0", + "5.25.0", + "5.24.0", + "5.23.0", + "5.22.0", + "5.21.0", + "5.20.0", + "5.19.0", + "5.18.0", + "5.17.0", + "5.16.0", "5.15.0", "5.14.0", "5.13.0", @@ -133,9 +153,8 @@ func IsCurrentVersion(versionToCheck string) bool { if toCheckMajor == currentMajor && toCheckMinor == currentMinor { return true - } else { - return false } + return false } func IsPreviousVersionsSupported(versionToCheck string) bool { diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/websocket_client.go b/vendor/github.com/mattermost/mattermost-server/v5/model/websocket_client.go new file mode 100644 index 0000000..cd89e2d --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/websocket_client.go @@ -0,0 +1,321 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "bytes" + "encoding/json" + "net/http" + "sync/atomic" + "time" + + "github.com/gorilla/websocket" +) + +const ( + SOCKET_MAX_MESSAGE_SIZE_KB = 8 * 1024 // 8KB + PING_TIMEOUT_BUFFER_SECONDS = 5 +) + +type msgType int + +const ( + msgTypeJSON msgType = iota + 1 + msgTypePong +) + +type writeMessage struct { + msgType msgType + data interface{} +} + +const avgReadMsgSizeBytes = 1024 + +// WebSocketClient stores the necessary information required to +// communicate with a WebSocket endpoint. +// A client must read from PingTimeoutChannel, EventChannel and ResponseChannel to prevent +// deadlocks from occurring in the program. +type WebSocketClient struct { + Url string // The location of the server like "ws://localhost:8065" + ApiUrl string // The API location of the server like "ws://localhost:8065/api/v3" + ConnectUrl string // The WebSocket URL to connect to like "ws://localhost:8065/api/v3/path/to/websocket" + Conn *websocket.Conn // The WebSocket connection + AuthToken string // The token used to open the WebSocket connection + Sequence int64 // The ever-incrementing sequence attached to each WebSocket action + PingTimeoutChannel chan bool // The channel used to signal ping timeouts + EventChannel chan *WebSocketEvent // The channel used to receive various events pushed from the server. For example: typing, posted + ResponseChannel chan *WebSocketResponse // The channel used to receive responses for requests made to the server + ListenError *AppError // A field that is set if there was an abnormal closure of the WebSocket connection + writeChan chan writeMessage + + pingTimeoutTimer *time.Timer + quitPingWatchdog chan struct{} + + quitWriterChan chan struct{} + resetTimerChan chan struct{} + closed int32 +} + +// NewWebSocketClient constructs a new WebSocket client with convenience +// methods for talking to the server. +func NewWebSocketClient(url, authToken string) (*WebSocketClient, *AppError) { + return NewWebSocketClientWithDialer(websocket.DefaultDialer, url, authToken) +} + +// NewWebSocketClientWithDialer constructs a new WebSocket client with convenience +// methods for talking to the server using a custom dialer. +func NewWebSocketClientWithDialer(dialer *websocket.Dialer, url, authToken string) (*WebSocketClient, *AppError) { + conn, _, err := dialer.Dial(url+API_URL_SUFFIX+"/websocket", nil) + if err != nil { + return nil, NewAppError("NewWebSocketClient", "model.websocket_client.connect_fail.app_error", nil, err.Error(), http.StatusInternalServerError) + } + + client := &WebSocketClient{ + Url: url, + ApiUrl: url + API_URL_SUFFIX, + ConnectUrl: url + API_URL_SUFFIX + "/websocket", + Conn: conn, + AuthToken: authToken, + Sequence: 1, + PingTimeoutChannel: make(chan bool, 1), + EventChannel: make(chan *WebSocketEvent, 100), + ResponseChannel: make(chan *WebSocketResponse, 100), + writeChan: make(chan writeMessage), + quitPingWatchdog: make(chan struct{}), + quitWriterChan: make(chan struct{}), + resetTimerChan: make(chan struct{}), + } + + client.configurePingHandling() + go client.writer() + + client.SendMessage(WEBSOCKET_AUTHENTICATION_CHALLENGE, map[string]interface{}{"token": authToken}) + + return client, nil +} + +// NewWebSocketClient4 constructs a new WebSocket client with convenience +// methods for talking to the server. Uses the v4 endpoint. +func NewWebSocketClient4(url, authToken string) (*WebSocketClient, *AppError) { + return NewWebSocketClient4WithDialer(websocket.DefaultDialer, url, authToken) +} + +// NewWebSocketClient4WithDialer constructs a new WebSocket client with convenience +// methods for talking to the server using a custom dialer. Uses the v4 endpoint. +func NewWebSocketClient4WithDialer(dialer *websocket.Dialer, url, authToken string) (*WebSocketClient, *AppError) { + return NewWebSocketClientWithDialer(dialer, url, authToken) +} + +// Connect creates a websocket connection with the given ConnectUrl. +// This is racy and error-prone should not be used. Use any of the New* functions to create a websocket. +func (wsc *WebSocketClient) Connect() *AppError { + return wsc.ConnectWithDialer(websocket.DefaultDialer) +} + +// ConnectWithDialer creates a websocket connection with the given ConnectUrl using the dialer. +// This is racy and error-prone and should not be used. Use any of the New* functions to create a websocket. +func (wsc *WebSocketClient) ConnectWithDialer(dialer *websocket.Dialer) *AppError { + var err error + wsc.Conn, _, err = dialer.Dial(wsc.ConnectUrl, nil) + if err != nil { + return NewAppError("Connect", "model.websocket_client.connect_fail.app_error", nil, err.Error(), http.StatusInternalServerError) + } + // Super racy and should not be done anyways. + // All of this needs to be redesigned for v6. + wsc.configurePingHandling() + // If it has been closed before, we just restart the writer. + if atomic.CompareAndSwapInt32(&wsc.closed, 1, 0) { + wsc.writeChan = make(chan writeMessage) + wsc.quitWriterChan = make(chan struct{}) + go wsc.writer() + wsc.resetTimerChan = make(chan struct{}) + wsc.quitPingWatchdog = make(chan struct{}) + } + + wsc.EventChannel = make(chan *WebSocketEvent, 100) + wsc.ResponseChannel = make(chan *WebSocketResponse, 100) + + wsc.SendMessage(WEBSOCKET_AUTHENTICATION_CHALLENGE, map[string]interface{}{"token": wsc.AuthToken}) + + return nil +} + +// Close closes the websocket client. It is recommended that a closed client should not be +// reused again. Rather a new client should be created anew. +func (wsc *WebSocketClient) Close() { + // CAS to 1 and proceed. Return if already 1. + if !atomic.CompareAndSwapInt32(&wsc.closed, 0, 1) { + return + } + wsc.quitWriterChan <- struct{}{} + close(wsc.writeChan) + // We close the connection, which breaks the reader loop. + // Then we let the defer block in the reader do further cleanup. + wsc.Conn.Close() +} + +// TODO: un-export the Conn so that Write methods go through the writer +func (wsc *WebSocketClient) writer() { + for { + select { + case msg := <-wsc.writeChan: + switch msg.msgType { + case msgTypeJSON: + wsc.Conn.WriteJSON(msg.data) + case msgTypePong: + wsc.Conn.WriteMessage(websocket.PongMessage, []byte{}) + } + case <-wsc.quitWriterChan: + return + } + } +} + +// Listen starts the read loop of the websocket client. +func (wsc *WebSocketClient) Listen() { + // This loop can exit in 2 conditions: + // 1. Either the connection breaks naturally. + // 2. Close was explicitly called, which closes the connection manually. + // + // Due to the way the API is written, there is a requirement that a client may NOT + // call Listen at all and can still call Close and Connect. + // Therefore, we let the cleanup of the reader stuff rely on closing the connection + // and then we do the cleanup in the defer block. + // + // First, we close some channels and then CAS to 1 and proceed to close the writer chan also. + // This is needed because then the defer clause does not double-close the writer when (2) happens. + // But if (1) happens, we set the closed bit, and close the rest of the stuff. + go func() { + defer func() { + close(wsc.EventChannel) + close(wsc.ResponseChannel) + close(wsc.quitPingWatchdog) + close(wsc.resetTimerChan) + // We CAS to 1 and proceed. + if !atomic.CompareAndSwapInt32(&wsc.closed, 0, 1) { + return + } + wsc.quitWriterChan <- struct{}{} + close(wsc.writeChan) + wsc.Conn.Close() // This can most likely be removed. Needs to be checked. + }() + + var buf bytes.Buffer + buf.Grow(avgReadMsgSizeBytes) + + for { + // Reset buffer. + buf.Reset() + _, r, err := wsc.Conn.NextReader() + if err != nil { + if !websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseNoStatusReceived) { + wsc.ListenError = NewAppError("NewWebSocketClient", "model.websocket_client.connect_fail.app_error", nil, err.Error(), http.StatusInternalServerError) + } + return + } + // Use pre-allocated buffer. + _, err = buf.ReadFrom(r) + if err != nil { + // This should use a different error ID, but en.json is not imported anyways. + // It's a different bug altogether but we let it be for now. + // See MM-24520. + wsc.ListenError = NewAppError("NewWebSocketClient", "model.websocket_client.connect_fail.app_error", nil, err.Error(), http.StatusInternalServerError) + return + } + + event := WebSocketEventFromJson(bytes.NewReader(buf.Bytes())) + if event == nil { + continue + } + if event.IsValid() { + wsc.EventChannel <- event + continue + } + + var response WebSocketResponse + if err := json.Unmarshal(buf.Bytes(), &response); err == nil && response.IsValid() { + wsc.ResponseChannel <- &response + continue + } + } + }() +} + +func (wsc *WebSocketClient) SendMessage(action string, data map[string]interface{}) { + req := &WebSocketRequest{} + req.Seq = wsc.Sequence + req.Action = action + req.Data = data + + wsc.Sequence++ + wsc.writeChan <- writeMessage{ + msgType: msgTypeJSON, + data: req, + } +} + +// UserTyping will push a user_typing event out to all connected users +// who are in the specified channel +func (wsc *WebSocketClient) UserTyping(channelId, parentId string) { + data := map[string]interface{}{ + "channel_id": channelId, + "parent_id": parentId, + } + + wsc.SendMessage("user_typing", data) +} + +// GetStatuses will return a map of string statuses using user id as the key +func (wsc *WebSocketClient) GetStatuses() { + wsc.SendMessage("get_statuses", nil) +} + +// GetStatusesByIds will fetch certain user statuses based on ids and return +// a map of string statuses using user id as the key +func (wsc *WebSocketClient) GetStatusesByIds(userIds []string) { + data := map[string]interface{}{ + "user_ids": userIds, + } + wsc.SendMessage("get_statuses_by_ids", data) +} + +func (wsc *WebSocketClient) configurePingHandling() { + wsc.Conn.SetPingHandler(wsc.pingHandler) + wsc.pingTimeoutTimer = time.NewTimer(time.Second * (60 + PING_TIMEOUT_BUFFER_SECONDS)) + go wsc.pingWatchdog() +} + +func (wsc *WebSocketClient) pingHandler(appData string) error { + if atomic.LoadInt32(&wsc.closed) == 1 { + return nil + } + wsc.resetTimerChan <- struct{}{} + wsc.writeChan <- writeMessage{ + msgType: msgTypePong, + } + return nil +} + +// pingWatchdog is used to send values to the PingTimeoutChannel whenever a timeout occurs. +// We use the resetTimerChan from the pingHandler to pass the signal, and then reset the timer +// after draining it. And if the timer naturally expires, we also extend it to prevent it from +// being deadlocked when the resetTimerChan case runs. Because timer.Stop would return false, +// and the code would be forever stuck trying to read from C. +func (wsc *WebSocketClient) pingWatchdog() { + for { + select { + case <-wsc.resetTimerChan: + if !wsc.pingTimeoutTimer.Stop() { + <-wsc.pingTimeoutTimer.C + } + wsc.pingTimeoutTimer.Reset(time.Second * (60 + PING_TIMEOUT_BUFFER_SECONDS)) + + case <-wsc.pingTimeoutTimer.C: + wsc.PingTimeoutChannel <- true + wsc.pingTimeoutTimer.Reset(time.Second * (60 + PING_TIMEOUT_BUFFER_SECONDS)) + case <-wsc.quitPingWatchdog: + return + } + } +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/websocket_message.go b/vendor/github.com/mattermost/mattermost-server/v5/model/websocket_message.go new file mode 100644 index 0000000..3d66772 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/websocket_message.go @@ -0,0 +1,286 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "bytes" + "encoding/json" + "fmt" + "io" +) + +const ( + WEBSOCKET_EVENT_TYPING = "typing" + WEBSOCKET_EVENT_POSTED = "posted" + WEBSOCKET_EVENT_POST_EDITED = "post_edited" + WEBSOCKET_EVENT_POST_DELETED = "post_deleted" + WEBSOCKET_EVENT_POST_UNREAD = "post_unread" + WEBSOCKET_EVENT_CHANNEL_CONVERTED = "channel_converted" + WEBSOCKET_EVENT_CHANNEL_CREATED = "channel_created" + WEBSOCKET_EVENT_CHANNEL_DELETED = "channel_deleted" + WEBSOCKET_EVENT_CHANNEL_RESTORED = "channel_restored" + WEBSOCKET_EVENT_CHANNEL_UPDATED = "channel_updated" + WEBSOCKET_EVENT_CHANNEL_MEMBER_UPDATED = "channel_member_updated" + WEBSOCKET_EVENT_CHANNEL_SCHEME_UPDATED = "channel_scheme_updated" + WEBSOCKET_EVENT_DIRECT_ADDED = "direct_added" + WEBSOCKET_EVENT_GROUP_ADDED = "group_added" + WEBSOCKET_EVENT_NEW_USER = "new_user" + WEBSOCKET_EVENT_ADDED_TO_TEAM = "added_to_team" + WEBSOCKET_EVENT_LEAVE_TEAM = "leave_team" + WEBSOCKET_EVENT_UPDATE_TEAM = "update_team" + WEBSOCKET_EVENT_DELETE_TEAM = "delete_team" + WEBSOCKET_EVENT_RESTORE_TEAM = "restore_team" + WEBSOCKET_EVENT_UPDATE_TEAM_SCHEME = "update_team_scheme" + WEBSOCKET_EVENT_USER_ADDED = "user_added" + WEBSOCKET_EVENT_USER_UPDATED = "user_updated" + WEBSOCKET_EVENT_USER_ROLE_UPDATED = "user_role_updated" + WEBSOCKET_EVENT_MEMBERROLE_UPDATED = "memberrole_updated" + WEBSOCKET_EVENT_USER_REMOVED = "user_removed" + WEBSOCKET_EVENT_PREFERENCE_CHANGED = "preference_changed" + WEBSOCKET_EVENT_PREFERENCES_CHANGED = "preferences_changed" + WEBSOCKET_EVENT_PREFERENCES_DELETED = "preferences_deleted" + WEBSOCKET_EVENT_EPHEMERAL_MESSAGE = "ephemeral_message" + WEBSOCKET_EVENT_STATUS_CHANGE = "status_change" + WEBSOCKET_EVENT_HELLO = "hello" + WEBSOCKET_AUTHENTICATION_CHALLENGE = "authentication_challenge" + WEBSOCKET_EVENT_REACTION_ADDED = "reaction_added" + WEBSOCKET_EVENT_REACTION_REMOVED = "reaction_removed" + WEBSOCKET_EVENT_RESPONSE = "response" + WEBSOCKET_EVENT_EMOJI_ADDED = "emoji_added" + WEBSOCKET_EVENT_CHANNEL_VIEWED = "channel_viewed" + WEBSOCKET_EVENT_PLUGIN_STATUSES_CHANGED = "plugin_statuses_changed" + WEBSOCKET_EVENT_PLUGIN_ENABLED = "plugin_enabled" + WEBSOCKET_EVENT_PLUGIN_DISABLED = "plugin_disabled" + WEBSOCKET_EVENT_ROLE_UPDATED = "role_updated" + WEBSOCKET_EVENT_LICENSE_CHANGED = "license_changed" + WEBSOCKET_EVENT_CONFIG_CHANGED = "config_changed" + WEBSOCKET_EVENT_OPEN_DIALOG = "open_dialog" + WEBSOCKET_EVENT_GUESTS_DEACTIVATED = "guests_deactivated" + WEBSOCKET_EVENT_USER_ACTIVATION_STATUS_CHANGE = "user_activation_status_change" + WEBSOCKET_EVENT_RECEIVED_GROUP = "received_group" + WEBSOCKET_EVENT_RECEIVED_GROUP_ASSOCIATED_TO_TEAM = "received_group_associated_to_team" + WEBSOCKET_EVENT_RECEIVED_GROUP_NOT_ASSOCIATED_TO_TEAM = "received_group_not_associated_to_team" + WEBSOCKET_EVENT_RECEIVED_GROUP_ASSOCIATED_TO_CHANNEL = "received_group_associated_to_channel" + WEBSOCKET_EVENT_RECEIVED_GROUP_NOT_ASSOCIATED_TO_CHANNEL = "received_group_not_associated_to_channel" + WEBSOCKET_EVENT_SIDEBAR_CATEGORY_CREATED = "sidebar_category_created" + WEBSOCKET_EVENT_SIDEBAR_CATEGORY_UPDATED = "sidebar_category_updated" + WEBSOCKET_EVENT_SIDEBAR_CATEGORY_DELETED = "sidebar_category_deleted" + WEBSOCKET_EVENT_SIDEBAR_CATEGORY_ORDER_UPDATED = "sidebar_category_order_updated" + WEBSOCKET_WARN_METRIC_STATUS_RECEIVED = "warn_metric_status_received" + WEBSOCKET_WARN_METRIC_STATUS_REMOVED = "warn_metric_status_removed" + WEBSOCKET_EVENT_CLOUD_PAYMENT_STATUS_UPDATED = "cloud_payment_status_updated" + WEBSOCKET_EVENT_THREAD_UPDATED = "thread_updated" + WEBSOCKET_EVENT_THREAD_FOLLOW_CHANGED = "thread_follow_changed" + WEBSOCKET_EVENT_THREAD_READ_CHANGED = "thread_read_changed" +) + +type WebSocketMessage interface { + ToJson() string + IsValid() bool + EventType() string +} + +type WebsocketBroadcast struct { + OmitUsers map[string]bool `json:"omit_users"` // broadcast is omitted for users listed here + UserId string `json:"user_id"` // broadcast only occurs for this user + ChannelId string `json:"channel_id"` // broadcast only occurs for users in this channel + TeamId string `json:"team_id"` // broadcast only occurs for users in this team + ContainsSanitizedData bool `json:"-"` + ContainsSensitiveData bool `json:"-"` +} + +type precomputedWebSocketEventJSON struct { + Event json.RawMessage + Data json.RawMessage + Broadcast json.RawMessage +} + +// webSocketEventJSON mirrors WebSocketEvent to make some of its unexported fields serializable +type webSocketEventJSON struct { + Event string `json:"event"` + Data map[string]interface{} `json:"data"` + Broadcast *WebsocketBroadcast `json:"broadcast"` + Sequence int64 `json:"seq"` +} + +// **NOTE**: Direct access to WebSocketEvent fields is deprecated. They will be +// made unexported in next major version release. Provided getter functions should be used instead. +type WebSocketEvent struct { + Event string // Deprecated: use EventType() + Data map[string]interface{} // Deprecated: use GetData() + Broadcast *WebsocketBroadcast // Deprecated: use GetBroadcast() + Sequence int64 // Deprecated: use GetSequence() + precomputedJSON *precomputedWebSocketEventJSON +} + +// PrecomputeJSON precomputes and stores the serialized JSON for all fields other than Sequence. +// This makes ToJson much more efficient when sending the same event to multiple connections. +func (ev *WebSocketEvent) PrecomputeJSON() *WebSocketEvent { + copy := ev.Copy() + event, _ := json.Marshal(copy.Event) + data, _ := json.Marshal(copy.Data) + broadcast, _ := json.Marshal(copy.Broadcast) + copy.precomputedJSON = &precomputedWebSocketEventJSON{ + Event: json.RawMessage(event), + Data: json.RawMessage(data), + Broadcast: json.RawMessage(broadcast), + } + return copy +} + +func (ev *WebSocketEvent) Add(key string, value interface{}) { + ev.Data[key] = value +} + +func NewWebSocketEvent(event, teamId, channelId, userId string, omitUsers map[string]bool) *WebSocketEvent { + return &WebSocketEvent{Event: event, Data: make(map[string]interface{}), + Broadcast: &WebsocketBroadcast{TeamId: teamId, ChannelId: channelId, UserId: userId, OmitUsers: omitUsers}} +} + +func (ev *WebSocketEvent) Copy() *WebSocketEvent { + copy := &WebSocketEvent{ + Event: ev.Event, + Data: ev.Data, + Broadcast: ev.Broadcast, + Sequence: ev.Sequence, + precomputedJSON: ev.precomputedJSON, + } + return copy +} + +func (ev *WebSocketEvent) GetData() map[string]interface{} { + return ev.Data +} + +func (ev *WebSocketEvent) GetBroadcast() *WebsocketBroadcast { + return ev.Broadcast +} + +func (ev *WebSocketEvent) GetSequence() int64 { + return ev.Sequence +} + +func (ev *WebSocketEvent) SetEvent(event string) *WebSocketEvent { + copy := ev.Copy() + copy.Event = event + return copy +} + +func (ev *WebSocketEvent) SetData(data map[string]interface{}) *WebSocketEvent { + copy := ev.Copy() + copy.Data = data + return copy +} + +func (ev *WebSocketEvent) SetBroadcast(broadcast *WebsocketBroadcast) *WebSocketEvent { + copy := ev.Copy() + copy.Broadcast = broadcast + return copy +} + +func (ev *WebSocketEvent) SetSequence(seq int64) *WebSocketEvent { + copy := ev.Copy() + copy.Sequence = seq + return copy +} + +func (ev *WebSocketEvent) IsValid() bool { + return ev.Event != "" +} + +func (ev *WebSocketEvent) EventType() string { + return ev.Event +} + +func (ev *WebSocketEvent) ToJson() string { + if ev.precomputedJSON != nil { + return fmt.Sprintf(`{"event": %s, "data": %s, "broadcast": %s, "seq": %d}`, ev.precomputedJSON.Event, ev.precomputedJSON.Data, ev.precomputedJSON.Broadcast, ev.Sequence) + } + b, _ := json.Marshal(webSocketEventJSON{ + ev.Event, + ev.Data, + ev.Broadcast, + ev.Sequence, + }) + return string(b) +} + +// Encode encodes the event to the given encoder. +func (ev *WebSocketEvent) Encode(enc *json.Encoder) error { + if ev.precomputedJSON != nil { + return enc.Encode(json.RawMessage( + fmt.Sprintf(`{"event": %s, "data": %s, "broadcast": %s, "seq": %d}`, ev.precomputedJSON.Event, ev.precomputedJSON.Data, ev.precomputedJSON.Broadcast, ev.Sequence), + )) + } + + return enc.Encode(webSocketEventJSON{ + ev.Event, + ev.Data, + ev.Broadcast, + ev.Sequence, + }) +} + +func WebSocketEventFromJson(data io.Reader) *WebSocketEvent { + var ev WebSocketEvent + var o webSocketEventJSON + if err := json.NewDecoder(data).Decode(&o); err != nil { + return nil + } + ev.Event = o.Event + if u, ok := o.Data["user"]; ok { + // We need to convert to and from JSON again + // because the user is in the form of a map[string]interface{}. + buf, err := json.Marshal(u) + if err != nil { + return nil + } + o.Data["user"] = UserFromJson(bytes.NewReader(buf)) + } + ev.Data = o.Data + ev.Broadcast = o.Broadcast + ev.Sequence = o.Sequence + return &ev +} + +// WebSocketResponse represents a response received through the WebSocket +// for a request made to the server. This is available through the ResponseChannel +// channel in WebSocketClient. +type WebSocketResponse struct { + Status string `json:"status"` // The status of the response. For example: OK, FAIL. + SeqReply int64 `json:"seq_reply,omitempty"` // A counter which is incremented for every response sent. + Data map[string]interface{} `json:"data,omitempty"` // The data contained in the response. + Error *AppError `json:"error,omitempty"` // A field that is set if any error has occurred. +} + +func (m *WebSocketResponse) Add(key string, value interface{}) { + m.Data[key] = value +} + +func NewWebSocketResponse(status string, seqReply int64, data map[string]interface{}) *WebSocketResponse { + return &WebSocketResponse{Status: status, SeqReply: seqReply, Data: data} +} + +func NewWebSocketError(seqReply int64, err *AppError) *WebSocketResponse { + return &WebSocketResponse{Status: STATUS_FAIL, SeqReply: seqReply, Error: err} +} + +func (m *WebSocketResponse) IsValid() bool { + return m.Status != "" +} + +func (m *WebSocketResponse) EventType() string { + return WEBSOCKET_EVENT_RESPONSE +} + +func (m *WebSocketResponse) ToJson() string { + b, _ := json.Marshal(m) + return string(b) +} + +func WebSocketResponseFromJson(data io.Reader) *WebSocketResponse { + var o *WebSocketResponse + json.NewDecoder(data).Decode(&o) + return o +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/websocket_request.go b/vendor/github.com/mattermost/mattermost-server/v5/model/websocket_request.go similarity index 52% rename from vendor/github.com/mattermost/mattermost-server/model/websocket_request.go rename to vendor/github.com/mattermost/mattermost-server/v5/model/websocket_request.go index aa6bd80..6628a5c 100644 --- a/vendor/github.com/mattermost/mattermost-server/model/websocket_request.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/model/websocket_request.go @@ -1,5 +1,5 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package model @@ -10,11 +10,12 @@ import ( goi18n "github.com/mattermost/go-i18n/i18n" ) +// WebSocketRequest represents a request made to the server through a websocket. type WebSocketRequest struct { // Client-provided fields - Seq int64 `json:"seq"` - Action string `json:"action"` - Data map[string]interface{} `json:"data"` + Seq int64 `json:"seq"` // A counter which is incremented for every request made. + Action string `json:"action"` // The action to perform for a request. For example: get_statuses, user_typing. + Data map[string]interface{} `json:"data"` // The metadata for an action. // Server-provided fields Session Session `json:"-"` diff --git a/vendor/github.com/mattermost/mattermost-server/plugin/api.go b/vendor/github.com/mattermost/mattermost-server/v5/plugin/api.go similarity index 65% rename from vendor/github.com/mattermost/mattermost-server/plugin/api.go rename to vendor/github.com/mattermost/mattermost-server/v5/plugin/api.go index 8b652d3..8a5f58d 100644 --- a/vendor/github.com/mattermost/mattermost-server/plugin/api.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/plugin/api.go @@ -1,11 +1,15 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package plugin import ( + "io" + "net/http" + plugin "github.com/hashicorp/go-plugin" - "github.com/mattermost/mattermost-server/model" + + "github.com/mattermost/mattermost-server/v5/model" ) // The API can be used to retrieve data or perform actions on behalf of the plugin. Most methods @@ -16,126 +20,226 @@ import ( type API interface { // LoadPluginConfiguration loads the plugin's configuration. dest should be a pointer to a // struct that the configuration JSON can be unmarshalled to. + // + // @tag Plugin + // Minimum server version: 5.2 LoadPluginConfiguration(dest interface{}) error // RegisterCommand registers a custom slash command. When the command is triggered, your plugin // can fulfill it via the ExecuteCommand hook. + // + // @tag Command + // Minimum server version: 5.2 RegisterCommand(command *model.Command) error // UnregisterCommand unregisters a command previously registered via RegisterCommand. + // + // @tag Command + // Minimum server version: 5.2 UnregisterCommand(teamId, trigger string) error + // ExecuteSlashCommand executes a slash command with the given parameters. + // + // @tag Command + // Minimum server version: 5.26 + ExecuteSlashCommand(commandArgs *model.CommandArgs) (*model.CommandResponse, error) + // GetSession returns the session object for the Session ID + // + // Minimum server version: 5.2 GetSession(sessionId string) (*model.Session, *model.AppError) // GetConfig fetches the currently persisted config + // + // @tag Configuration + // Minimum server version: 5.2 GetConfig() *model.Config + // GetUnsanitizedConfig fetches the currently persisted config without removing secrets. + // + // @tag Configuration + // Minimum server version: 5.16 + GetUnsanitizedConfig() *model.Config + // SaveConfig sets the given config and persists the changes + // + // @tag Configuration + // Minimum server version: 5.2 SaveConfig(config *model.Config) *model.AppError // GetPluginConfig fetches the currently persisted config of plugin // + // @tag Plugin // Minimum server version: 5.6 GetPluginConfig() map[string]interface{} // SavePluginConfig sets the given config for plugin and persists the changes // + // @tag Plugin // Minimum server version: 5.6 SavePluginConfig(config map[string]interface{}) *model.AppError // GetBundlePath returns the absolute path where the plugin's bundle was unpacked. // + // @tag Plugin // Minimum server version: 5.10 GetBundlePath() (string, error) - // GetLicense returns the current license used by the Mattermost server. Returns nil if the + // GetLicense returns the current license used by the Mattermost server. Returns nil if // the server does not have a license. // + // @tag Server // Minimum server version: 5.10 GetLicense() *model.License // GetServerVersion return the current Mattermost server version // + // @tag Server // Minimum server version: 5.4 GetServerVersion() string // GetSystemInstallDate returns the time that Mattermost was first installed and ran. // + // @tag Server // Minimum server version: 5.10 GetSystemInstallDate() (int64, *model.AppError) // GetDiagnosticId returns a unique identifier used by the server for diagnostic reports. // + // @tag Server // Minimum server version: 5.10 GetDiagnosticId() string + // GetTelemetryId returns a unique identifier used by the server for telemetry reports. + // + // @tag Server + // Minimum server version: 5.28 + GetTelemetryId() string + // CreateUser creates a user. + // + // @tag User + // Minimum server version: 5.2 CreateUser(user *model.User) (*model.User, *model.AppError) // DeleteUser deletes a user. + // + // @tag User + // Minimum server version: 5.2 DeleteUser(userId string) *model.AppError // GetUsers a list of users based on search options. // + // @tag User // Minimum server version: 5.10 GetUsers(options *model.UserGetOptions) ([]*model.User, *model.AppError) // GetUser gets a user. + // + // @tag User + // Minimum server version: 5.2 GetUser(userId string) (*model.User, *model.AppError) // GetUserByEmail gets a user by their email address. + // + // @tag User + // Minimum server version: 5.2 GetUserByEmail(email string) (*model.User, *model.AppError) // GetUserByUsername gets a user by their username. + // + // @tag User + // Minimum server version: 5.2 GetUserByUsername(name string) (*model.User, *model.AppError) // GetUsersByUsernames gets users by their usernames. // + // @tag User // Minimum server version: 5.6 GetUsersByUsernames(usernames []string) ([]*model.User, *model.AppError) // GetUsersInTeam gets users in team. // + // @tag User + // @tag Team // Minimum server version: 5.6 GetUsersInTeam(teamId string, page int, perPage int) ([]*model.User, *model.AppError) + // GetPreferencesForUser gets a user's preferences. + // + // @tag User + // @tag Preference + // Minimum server version: 5.26 + GetPreferencesForUser(userId string) ([]model.Preference, *model.AppError) + + // UpdatePreferencesForUser updates a user's preferences. + // + // @tag User + // @tag Preference + // Minimum server version: 5.26 + UpdatePreferencesForUser(userId string, preferences []model.Preference) *model.AppError + + // DeletePreferencesForUser deletes a user's preferences. + // + // @tag User + // @tag Preference + // Minimum server version: 5.26 + DeletePreferencesForUser(userId string, preferences []model.Preference) *model.AppError + // GetTeamIcon gets the team icon. // + // @tag Team // Minimum server version: 5.6 GetTeamIcon(teamId string) ([]byte, *model.AppError) // SetTeamIcon sets the team icon. // + // @tag Team // Minimum server version: 5.6 SetTeamIcon(teamId string, data []byte) *model.AppError // RemoveTeamIcon removes the team icon. // + // @tag Team // Minimum server version: 5.6 RemoveTeamIcon(teamId string) *model.AppError // UpdateUser updates a user. + // + // @tag User + // Minimum server version: 5.2 UpdateUser(user *model.User) (*model.User, *model.AppError) // GetUserStatus will get a user's status. + // + // @tag User + // Minimum server version: 5.2 GetUserStatus(userId string) (*model.Status, *model.AppError) // GetUserStatusesByIds will return a list of user statuses based on the provided slice of user IDs. + // + // @tag User + // Minimum server version: 5.2 GetUserStatusesByIds(userIds []string) ([]*model.Status, *model.AppError) // UpdateUserStatus will set a user's status until the user, or another integration/plugin, sets it back to online. // The status parameter can be: "online", "away", "dnd", or "offline". + // + // @tag User + // Minimum server version: 5.2 UpdateUserStatus(userId, status string) (*model.Status, *model.AppError) // UpdateUserActive deactivates or reactivates an user. // + // @tag User // Minimum server version: 5.8 UpdateUserActive(userId string, active bool) *model.AppError // GetUsersInChannel returns a page of users in a channel. Page counting starts at 0. // The sortBy parameter can be: "username" or "status". // + // @tag User + // @tag Channel // Minimum server version: 5.6 GetUsersInChannel(channelId, sortBy string, page, perPage int) ([]*model.User, *model.AppError) @@ -144,224 +248,416 @@ type API interface { // Returns a map with attribute names as keys and the user's attributes as values. // Requires an enterprise license, LDAP to be configured and for the user to use LDAP as an authentication method. // + // @tag User // Minimum server version: 5.3 GetLDAPUserAttributes(userId string, attributes []string) (map[string]string, *model.AppError) // CreateTeam creates a team. + // + // @tag Team + // Minimum server version: 5.2 CreateTeam(team *model.Team) (*model.Team, *model.AppError) // DeleteTeam deletes a team. + // + // @tag Team + // Minimum server version: 5.2 DeleteTeam(teamId string) *model.AppError // GetTeam gets all teams. + // + // @tag Team + // Minimum server version: 5.2 GetTeams() ([]*model.Team, *model.AppError) // GetTeam gets a team. + // + // @tag Team + // Minimum server version: 5.2 GetTeam(teamId string) (*model.Team, *model.AppError) // GetTeamByName gets a team by its name. + // + // @tag Team + // Minimum server version: 5.2 GetTeamByName(name string) (*model.Team, *model.AppError) // GetTeamsUnreadForUser gets the unread message and mention counts for each team to which the given user belongs. // + // @tag Team + // @tag User // Minimum server version: 5.6 GetTeamsUnreadForUser(userId string) ([]*model.TeamUnread, *model.AppError) // UpdateTeam updates a team. + // + // @tag Team + // Minimum server version: 5.2 UpdateTeam(team *model.Team) (*model.Team, *model.AppError) // SearchTeams search a team. // + // @tag Team // Minimum server version: 5.8 SearchTeams(term string) ([]*model.Team, *model.AppError) // GetTeamsForUser returns list of teams of given user ID. // + // @tag Team + // @tag User // Minimum server version: 5.6 GetTeamsForUser(userId string) ([]*model.Team, *model.AppError) // CreateTeamMember creates a team membership. + // + // @tag Team + // @tag User + // Minimum server version: 5.2 CreateTeamMember(teamId, userId string) (*model.TeamMember, *model.AppError) - // CreateTeamMember creates a team membership for all provided user ids. + // CreateTeamMembers creates a team membership for all provided user ids. + // + // @tag Team + // @tag User + // Minimum server version: 5.2 CreateTeamMembers(teamId string, userIds []string, requestorId string) ([]*model.TeamMember, *model.AppError) + // CreateTeamMembersGracefully creates a team membership for all provided user ids and reports the users that were not added. + // + // @tag Team + // @tag User + // Minimum server version: 5.20 + CreateTeamMembersGracefully(teamId string, userIds []string, requestorId string) ([]*model.TeamMemberWithError, *model.AppError) + // DeleteTeamMember deletes a team membership. + // + // @tag Team + // @tag User + // Minimum server version: 5.2 DeleteTeamMember(teamId, userId, requestorId string) *model.AppError // GetTeamMembers returns the memberships of a specific team. + // + // @tag Team + // @tag User + // Minimum server version: 5.2 GetTeamMembers(teamId string, page, perPage int) ([]*model.TeamMember, *model.AppError) // GetTeamMember returns a specific membership. + // + // @tag Team + // @tag User + // Minimum server version: 5.2 GetTeamMember(teamId, userId string) (*model.TeamMember, *model.AppError) // GetTeamMembersForUser returns all team memberships for a user. // + // @tag Team + // @tag User // Minimum server version: 5.10 GetTeamMembersForUser(userId string, page int, perPage int) ([]*model.TeamMember, *model.AppError) // UpdateTeamMemberRoles updates the role for a team membership. + // + // @tag Team + // @tag User + // Minimum server version: 5.2 UpdateTeamMemberRoles(teamId, userId, newRoles string) (*model.TeamMember, *model.AppError) // CreateChannel creates a channel. + // + // @tag Channel + // Minimum server version: 5.2 CreateChannel(channel *model.Channel) (*model.Channel, *model.AppError) // DeleteChannel deletes a channel. + // + // @tag Channel + // Minimum server version: 5.2 DeleteChannel(channelId string) *model.AppError // GetPublicChannelsForTeam gets a list of all channels. + // + // @tag Channel + // @tag Team + // Minimum server version: 5.2 GetPublicChannelsForTeam(teamId string, page, perPage int) ([]*model.Channel, *model.AppError) // GetChannel gets a channel. + // + // @tag Channel + // Minimum server version: 5.2 GetChannel(channelId string) (*model.Channel, *model.AppError) // GetChannelByName gets a channel by its name, given a team id. + // + // @tag Channel + // Minimum server version: 5.2 GetChannelByName(teamId, name string, includeDeleted bool) (*model.Channel, *model.AppError) // GetChannelByNameForTeamName gets a channel by its name, given a team name. + // + // @tag Channel + // @tag Team + // Minimum server version: 5.2 GetChannelByNameForTeamName(teamName, channelName string, includeDeleted bool) (*model.Channel, *model.AppError) // GetChannelsForTeamForUser gets a list of channels for given user ID in given team ID. // + // @tag Channel + // @tag Team + // @tag User // Minimum server version: 5.6 GetChannelsForTeamForUser(teamId, userId string, includeDeleted bool) ([]*model.Channel, *model.AppError) // GetChannelStats gets statistics for a channel. // + // @tag Channel // Minimum server version: 5.6 GetChannelStats(channelId string) (*model.ChannelStats, *model.AppError) // GetDirectChannel gets a direct message channel. // If the channel does not exist it will create it. + // + // @tag Channel + // @tag User + // Minimum server version: 5.2 GetDirectChannel(userId1, userId2 string) (*model.Channel, *model.AppError) // GetGroupChannel gets a group message channel. // If the channel does not exist it will create it. + // + // @tag Channel + // @tag User + // Minimum server version: 5.2 GetGroupChannel(userIds []string) (*model.Channel, *model.AppError) // UpdateChannel updates a channel. + // + // @tag Channel + // Minimum server version: 5.2 UpdateChannel(channel *model.Channel) (*model.Channel, *model.AppError) // SearchChannels returns the channels on a team matching the provided search term. // + // @tag Channel // Minimum server version: 5.6 SearchChannels(teamId string, term string) ([]*model.Channel, *model.AppError) // SearchUsers returns a list of users based on some search criteria. // + // @tag User // Minimum server version: 5.6 SearchUsers(search *model.UserSearch) ([]*model.User, *model.AppError) // SearchPostsInTeam returns a list of posts in a specific team that match the given params. // + // @tag Post + // @tag Team // Minimum server version: 5.10 SearchPostsInTeam(teamId string, paramsList []*model.SearchParams) ([]*model.Post, *model.AppError) - // AddChannelMember creates a channel membership for a user. + // SearchPostsInTeamForUser returns a list of posts by team and user that match the given + // search parameters. + // @tag Post + // Minimum server version: 5.26 + SearchPostsInTeamForUser(teamId string, userId string, searchParams model.SearchParameter) (*model.PostSearchResults, *model.AppError) + + // AddChannelMember joins a user to a channel (as if they joined themselves) + // This means the user will not receive notifications for joining the channel. + // + // @tag Channel + // @tag User + // Minimum server version: 5.2 AddChannelMember(channelId, userId string) (*model.ChannelMember, *model.AppError) + // AddUserToChannel adds a user to a channel as if the specified user had invited them. + // This means the user will receive the regular notifications for being added to the channel. + // + // @tag User + // @tag Channel + // Minimum server version: 5.18 + AddUserToChannel(channelId, userId, asUserId string) (*model.ChannelMember, *model.AppError) + // GetChannelMember gets a channel membership for a user. + // + // @tag Channel + // @tag User + // Minimum server version: 5.2 GetChannelMember(channelId, userId string) (*model.ChannelMember, *model.AppError) // GetChannelMembers gets a channel membership for all users. // + // @tag Channel + // @tag User // Minimum server version: 5.6 GetChannelMembers(channelId string, page, perPage int) (*model.ChannelMembers, *model.AppError) // GetChannelMembersByIds gets a channel membership for a particular User // + // @tag Channel + // @tag User // Minimum server version: 5.6 GetChannelMembersByIds(channelId string, userIds []string) (*model.ChannelMembers, *model.AppError) // GetChannelMembersForUser returns all channel memberships on a team for a user. // + // @tag Channel + // @tag User // Minimum server version: 5.10 GetChannelMembersForUser(teamId, userId string, page, perPage int) ([]*model.ChannelMember, *model.AppError) // UpdateChannelMemberRoles updates a user's roles for a channel. + // + // @tag Channel + // @tag User + // Minimum server version: 5.2 UpdateChannelMemberRoles(channelId, userId, newRoles string) (*model.ChannelMember, *model.AppError) // UpdateChannelMemberNotifications updates a user's notification properties for a channel. + // + // @tag Channel + // @tag User + // Minimum server version: 5.2 UpdateChannelMemberNotifications(channelId, userId string, notifications map[string]string) (*model.ChannelMember, *model.AppError) + // GetGroup gets a group by ID. + // + // @tag Group + // Minimum server version: 5.18 + GetGroup(groupId string) (*model.Group, *model.AppError) + + // GetGroupByName gets a group by name. + // + // @tag Group + // Minimum server version: 5.18 + GetGroupByName(name string) (*model.Group, *model.AppError) + + // GetGroupsForUser gets the groups a user is in. + // + // @tag Group + // @tag User + // Minimum server version: 5.18 + GetGroupsForUser(userId string) ([]*model.Group, *model.AppError) + // DeleteChannelMember deletes a channel membership for a user. + // + // @tag Channel + // @tag User + // Minimum server version: 5.2 DeleteChannelMember(channelId, userId string) *model.AppError // CreatePost creates a post. + // + // @tag Post + // Minimum server version: 5.2 CreatePost(post *model.Post) (*model.Post, *model.AppError) // AddReaction add a reaction to a post. // + // @tag Post // Minimum server version: 5.3 AddReaction(reaction *model.Reaction) (*model.Reaction, *model.AppError) // RemoveReaction remove a reaction from a post. // + // @tag Post // Minimum server version: 5.3 RemoveReaction(reaction *model.Reaction) *model.AppError // GetReaction get the reactions of a post. // + // @tag Post // Minimum server version: 5.3 GetReactions(postId string) ([]*model.Reaction, *model.AppError) // SendEphemeralPost creates an ephemeral post. + // + // @tag Post + // Minimum server version: 5.2 SendEphemeralPost(userId string, post *model.Post) *model.Post // UpdateEphemeralPost updates an ephemeral message previously sent to the user. // EXPERIMENTAL: This API is experimental and can be changed without advance notice. + // + // @tag Post + // Minimum server version: 5.2 UpdateEphemeralPost(userId string, post *model.Post) *model.Post // DeleteEphemeralPost deletes an ephemeral message previously sent to the user. // EXPERIMENTAL: This API is experimental and can be changed without advance notice. + // + // @tag Post + // Minimum server version: 5.2 DeleteEphemeralPost(userId, postId string) // DeletePost deletes a post. + // + // @tag Post + // Minimum server version: 5.2 DeletePost(postId string) *model.AppError // GetPostThread gets a post with all the other posts in the same thread. // + // @tag Post // Minimum server version: 5.6 GetPostThread(postId string) (*model.PostList, *model.AppError) // GetPost gets a post. + // + // @tag Post + // Minimum server version: 5.2 GetPost(postId string) (*model.Post, *model.AppError) // GetPostsSince gets posts created after a specified time as Unix time in milliseconds. // + // @tag Post + // @tag Channel // Minimum server version: 5.6 GetPostsSince(channelId string, time int64) (*model.PostList, *model.AppError) // GetPostsAfter gets a page of posts that were posted after the post provided. // + // @tag Post + // @tag Channel // Minimum server version: 5.6 GetPostsAfter(channelId, postId string, page, perPage int) (*model.PostList, *model.AppError) // GetPostsBefore gets a page of posts that were posted before the post provided. // + // @tag Post + // @tag Channel // Minimum server version: 5.6 GetPostsBefore(channelId, postId string, page, perPage int) (*model.PostList, *model.AppError) // GetPostsForChannel gets a list of posts for a channel. // + // @tag Post + // @tag Channel // Minimum server version: 5.6 GetPostsForChannel(channelId string, page, perPage int) (*model.PostList, *model.AppError) // GetTeamStats gets a team's statistics // + // @tag Team // Minimum server version: 5.8 GetTeamStats(teamId string) (*model.TeamStats, *model.AppError) // UpdatePost updates a post. + // + // @tag Post + // Minimum server version: 5.2 UpdatePost(post *model.Post) (*model.Post, *model.AppError) // GetProfileImage gets user's profile image. // + // @tag User // Minimum server version: 5.6 GetProfileImage(userId string) ([]byte, *model.AppError) // SetProfileImage sets a user's profile image. // + // @tag User // Minimum server version: 5.6 SetProfileImage(userId string, data []byte) *model.AppError @@ -369,16 +665,19 @@ type API interface { // // The sortBy parameter can be: "name". // + // @tag Emoji // Minimum server version: 5.6 GetEmojiList(sortBy string, page, perPage int) ([]*model.Emoji, *model.AppError) // GetEmojiByName gets an emoji by it's name. // + // @tag Emoji // Minimum server version: 5.6 GetEmojiByName(name string) (*model.Emoji, *model.AppError) // GetEmoji returns a custom emoji based on the emojiId string. // + // @tag Emoji // Minimum server version: 5.6 GetEmoji(emojiId string) (*model.Emoji, *model.AppError) @@ -388,35 +687,52 @@ type API interface { // The duplicate FileInfo objects are not initially linked to a post, but may now be passed // to CreatePost. Use this API to duplicate a post and its file attachments without // actually duplicating the uploaded files. + // + // @tag File + // @tag User + // Minimum server version: 5.2 CopyFileInfos(userId string, fileIds []string) ([]string, *model.AppError) // GetFileInfo gets a File Info for a specific fileId // + // @tag File // Minimum server version: 5.3 GetFileInfo(fileId string) (*model.FileInfo, *model.AppError) + // GetFileInfos gets File Infos with options + // + // @tag File + // Minimum server version: 5.22 + GetFileInfos(page, perPage int, opt *model.GetFileInfosOptions) ([]*model.FileInfo, *model.AppError) + // GetFile gets content of a file by it's ID // - // Minimum Server version: 5.8 + // @tag File + // Minimum server version: 5.8 GetFile(fileId string) ([]byte, *model.AppError) // GetFileLink gets the public link to a file by fileId. // + // @tag File // Minimum server version: 5.6 GetFileLink(fileId string) (string, *model.AppError) - // ReadFileAtPath reads the file from the backend for a specific path + // ReadFile reads the file from the backend for a specific path // + // @tag File // Minimum server version: 5.3 ReadFile(path string) ([]byte, *model.AppError) // GetEmojiImage returns the emoji image. // + // @tag Emoji // Minimum server version: 5.6 GetEmojiImage(emojiId string) ([]byte, string, *model.AppError) // UploadFile will upload a file to a channel using a multipart request, to be later attached to a post. // + // @tag File + // @tag Channel // Minimum server version: 5.6 UploadFile(data []byte, channelId string, filename string) (*model.FileInfo, *model.AppError) @@ -431,33 +747,48 @@ type API interface { // GetPlugins will return a list of plugin manifests for currently active plugins. // + // @tag Plugin // Minimum server version: 5.6 GetPlugins() ([]*model.Manifest, *model.AppError) // EnablePlugin will enable an plugin installed. // + // @tag Plugin // Minimum server version: 5.6 EnablePlugin(id string) *model.AppError // DisablePlugin will disable an enabled plugin. // + // @tag Plugin // Minimum server version: 5.6 DisablePlugin(id string) *model.AppError // RemovePlugin will disable and delete a plugin. // + // @tag Plugin // Minimum server version: 5.6 RemovePlugin(id string) *model.AppError // GetPluginStatus will return the status of a plugin. // + // @tag Plugin // Minimum server version: 5.6 GetPluginStatus(id string) (*model.PluginStatus, *model.AppError) + // InstallPlugin will upload another plugin with tar.gz file. + // Previous version will be replaced on replace true. + // + // @tag Plugin + // Minimum server version: 5.18 + InstallPlugin(file io.Reader, replace bool) (*model.Manifest, *model.AppError) + // KV Store Section // KVSet stores a key-value pair, unique per plugin. // Provided helper functions and internal plugin code will use the prefix `mmi_` before keys. Do not use this prefix. + // + // @tag KeyValueStore + // Minimum server version: 5.2 KVSet(key string, value []byte) *model.AppError // KVCompareAndSet updates a key-value pair, unique per plugin, but only if the current value matches the given oldValue. @@ -466,27 +797,54 @@ type API interface { // Returns (false, nil) if current value != oldValue or key already exists when inserting // Returns (true, nil) if current value == oldValue or new key is inserted // + // @tag KeyValueStore // Minimum server version: 5.12 KVCompareAndSet(key string, oldValue, newValue []byte) (bool, *model.AppError) + // KVCompareAndDelete deletes a key-value pair, unique per plugin, but only if the current value matches the given oldValue. + // Returns (false, err) if DB error occurred + // Returns (false, nil) if current value != oldValue or key does not exist when deleting + // Returns (true, nil) if current value == oldValue and the key was deleted + // + // @tag KeyValueStore + // Minimum server version: 5.16 + KVCompareAndDelete(key string, oldValue []byte) (bool, *model.AppError) + + // KVSetWithOptions stores a key-value pair, unique per plugin, according to the given options. + // Returns (false, err) if DB error occurred + // Returns (false, nil) if the value was not set + // Returns (true, nil) if the value was set + // + // Minimum server version: 5.20 + KVSetWithOptions(key string, value []byte, options model.PluginKVSetOptions) (bool, *model.AppError) + // KVSet stores a key-value pair with an expiry time, unique per plugin. // + // @tag KeyValueStore // Minimum server version: 5.6 KVSetWithExpiry(key string, value []byte, expireInSeconds int64) *model.AppError // KVGet retrieves a value based on the key, unique per plugin. Returns nil for non-existent keys. + // + // @tag KeyValueStore + // Minimum server version: 5.2 KVGet(key string) ([]byte, *model.AppError) // KVDelete removes a key-value pair, unique per plugin. Returns nil for non-existent keys. + // + // @tag KeyValueStore + // Minimum server version: 5.2 KVDelete(key string) *model.AppError // KVDeleteAll removes all key-value pairs for a plugin. // + // @tag KeyValueStore // Minimum server version: 5.6 KVDeleteAll() *model.AppError // KVList lists all keys for a plugin. // + // @tag KeyValueStore // Minimum server version: 5.6 KVList(page, perPage int) ([]string, *model.AppError) @@ -494,45 +852,60 @@ type API interface { // event is the type and will be prepended with "custom__". // payload is the data sent with the event. Interface values must be primitive Go types or mattermost-server/model types. // broadcast determines to which users to send the event. + // + // Minimum server version: 5.2 PublishWebSocketEvent(event string, payload map[string]interface{}, broadcast *model.WebsocketBroadcast) // HasPermissionTo check if the user has the permission at system scope. // + // @tag User // Minimum server version: 5.3 HasPermissionTo(userId string, permission *model.Permission) bool // HasPermissionToTeam check if the user has the permission at team scope. // + // @tag User + // @tag Team // Minimum server version: 5.3 HasPermissionToTeam(userId, teamId string, permission *model.Permission) bool // HasPermissionToChannel check if the user has the permission at channel scope. // + // @tag User + // @tag Channel // Minimum server version: 5.3 HasPermissionToChannel(userId, channelId string, permission *model.Permission) bool // LogDebug writes a log message to the Mattermost server log file. // Appropriate context such as the plugin name will already be added as fields so plugins // do not need to add that info. - // keyValuePairs should be primitive go types or other values that can be encoded by encoding/gob + // + // @tag Logging + // Minimum server version: 5.2 LogDebug(msg string, keyValuePairs ...interface{}) // LogInfo writes a log message to the Mattermost server log file. // Appropriate context such as the plugin name will already be added as fields so plugins // do not need to add that info. - // keyValuePairs should be primitive go types or other values that can be encoded by encoding/gob + // + // @tag Logging + // Minimum server version: 5.2 LogInfo(msg string, keyValuePairs ...interface{}) // LogError writes a log message to the Mattermost server log file. // Appropriate context such as the plugin name will already be added as fields so plugins // do not need to add that info. - // keyValuePairs should be primitive go types or other values that can be encoded by encoding/gob + // + // @tag Logging + // Minimum server version: 5.2 LogError(msg string, keyValuePairs ...interface{}) // LogWarn writes a log message to the Mattermost server log file. // Appropriate context such as the plugin name will already be added as fields so plugins // do not need to add that info. - // keyValuePairs should be primitive go types or other values that can be encoded by encoding/gob + // + // @tag Logging + // Minimum server version: 5.2 LogWarn(msg string, keyValuePairs ...interface{}) // SendMail sends an email to a specific address @@ -542,49 +915,129 @@ type API interface { // CreateBot creates the given bot and corresponding user. // + // @tag Bot // Minimum server version: 5.10 CreateBot(bot *model.Bot) (*model.Bot, *model.AppError) // PatchBot applies the given patch to the bot and corresponding user. // + // @tag Bot // Minimum server version: 5.10 PatchBot(botUserId string, botPatch *model.BotPatch) (*model.Bot, *model.AppError) // GetBot returns the given bot. // + // @tag Bot // Minimum server version: 5.10 GetBot(botUserId string, includeDeleted bool) (*model.Bot, *model.AppError) // GetBots returns the requested page of bots. // + // @tag Bot // Minimum server version: 5.10 GetBots(options *model.BotGetOptions) ([]*model.Bot, *model.AppError) // UpdateBotActive marks a bot as active or inactive, along with its corresponding user. // + // @tag Bot // Minimum server version: 5.10 UpdateBotActive(botUserId string, active bool) (*model.Bot, *model.AppError) // PermanentDeleteBot permanently deletes a bot and its corresponding user. // + // @tag Bot // Minimum server version: 5.10 PermanentDeleteBot(botUserId string) *model.AppError // GetBotIconImage gets LHS bot icon image. // + // @tag Bot // Minimum server version: 5.14 GetBotIconImage(botUserId string) ([]byte, *model.AppError) // SetBotIconImage sets LHS bot icon image. // Icon image must be SVG format, all other formats are rejected. // + // @tag Bot // Minimum server version: 5.14 SetBotIconImage(botUserId string, data []byte) *model.AppError // DeleteBotIconImage deletes LHS bot icon image. // + // @tag Bot // Minimum server version: 5.14 DeleteBotIconImage(botUserId string) *model.AppError + + // PluginHTTP allows inter-plugin requests to plugin APIs. + // + // Minimum server version: 5.18 + PluginHTTP(request *http.Request) *http.Response + + // PublishUserTyping publishes a user is typing WebSocket event. + // The parentId parameter may be an empty string, the other parameters are required. + // + // @tag User + // Minimum server version: 5.26 + PublishUserTyping(userId, channelId, parentId string) *model.AppError + + // CreateCommand creates a server-owned slash command that is not handled by the plugin + // itself, and which will persist past the life of the plugin. The command will have its + // CreatorId set to "" and its PluginId set to the id of the plugin that created it. + // + // @tag SlashCommand + // Minimum server version: 5.28 + CreateCommand(cmd *model.Command) (*model.Command, error) + + // ListCommands returns the list of all slash commands for teamID. E.g., custom commands + // (those created through the integrations menu, the REST api, or the plugin api CreateCommand), + // plugin commands (those created with plugin api RegisterCommand), and builtin commands + // (those added internally through RegisterCommandProvider). + // + // @tag SlashCommand + // Minimum server version: 5.28 + ListCommands(teamID string) ([]*model.Command, error) + + // ListCustomCommands returns the list of slash commands for teamID that where created + // through the integrations menu, the REST api, or the plugin api CreateCommand. + // + // @tag SlashCommand + // Minimum server version: 5.28 + ListCustomCommands(teamID string) ([]*model.Command, error) + + // ListPluginCommands returns the list of slash commands for teamID that were created + // with the plugin api RegisterCommand. + // + // @tag SlashCommand + // Minimum server version: 5.28 + ListPluginCommands(teamID string) ([]*model.Command, error) + + // ListBuiltInCommands returns the list of slash commands that are builtin commands + // (those added internally through RegisterCommandProvider). + // + // @tag SlashCommand + // Minimum server version: 5.28 + ListBuiltInCommands() ([]*model.Command, error) + + // GetCommand returns the command definition based on a command id string. + // + // @tag SlashCommand + // Minimum server version: 5.28 + GetCommand(commandID string) (*model.Command, error) + + // UpdateCommand updates a single command (commandID) with the information provided in the + // updatedCmd model.Command struct. The following fields in the command cannot be updated: + // Id, Token, CreateAt, DeleteAt, and PluginId. If updatedCmd.TeamId is blank, it + // will be set to commandID's TeamId. + // + // @tag SlashCommand + // Minimum server version: 5.28 + UpdateCommand(commandID string, updatedCmd *model.Command) (*model.Command, error) + + // DeleteCommand deletes a slash command (commandID). + // + // @tag SlashCommand + // Minimum server version: 5.28 + DeleteCommand(commandID string) error } var handshake = plugin.HandshakeConfig{ diff --git a/vendor/github.com/mattermost/mattermost-server/v5/plugin/api_timer_layer_generated.go b/vendor/github.com/mattermost/mattermost-server/v5/plugin/api_timer_layer_generated.go new file mode 100644 index 0000000..ddda506 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/plugin/api_timer_layer_generated.go @@ -0,0 +1,1101 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +// Code generated by "make pluginapi" +// DO NOT EDIT + +package plugin + +import ( + "io" + "net/http" + timePkg "time" + + "github.com/mattermost/mattermost-server/v5/einterfaces" + "github.com/mattermost/mattermost-server/v5/model" +) + +type apiTimerLayer struct { + pluginID string + apiImpl API + metrics einterfaces.MetricsInterface +} + +func (api *apiTimerLayer) recordTime(startTime timePkg.Time, name string, success bool) { + if api.metrics != nil { + elapsedTime := float64(timePkg.Since(startTime)) / float64(timePkg.Second) + api.metrics.ObservePluginApiDuration(api.pluginID, name, success, elapsedTime) + } +} + +func (api *apiTimerLayer) LoadPluginConfiguration(dest interface{}) error { + startTime := timePkg.Now() + _returnsA := api.apiImpl.LoadPluginConfiguration(dest) + api.recordTime(startTime, "LoadPluginConfiguration", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) RegisterCommand(command *model.Command) error { + startTime := timePkg.Now() + _returnsA := api.apiImpl.RegisterCommand(command) + api.recordTime(startTime, "RegisterCommand", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) UnregisterCommand(teamId, trigger string) error { + startTime := timePkg.Now() + _returnsA := api.apiImpl.UnregisterCommand(teamId, trigger) + api.recordTime(startTime, "UnregisterCommand", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) ExecuteSlashCommand(commandArgs *model.CommandArgs) (*model.CommandResponse, error) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.ExecuteSlashCommand(commandArgs) + api.recordTime(startTime, "ExecuteSlashCommand", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetSession(sessionId string) (*model.Session, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetSession(sessionId) + api.recordTime(startTime, "GetSession", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetConfig() *model.Config { + startTime := timePkg.Now() + _returnsA := api.apiImpl.GetConfig() + api.recordTime(startTime, "GetConfig", true) + return _returnsA +} + +func (api *apiTimerLayer) GetUnsanitizedConfig() *model.Config { + startTime := timePkg.Now() + _returnsA := api.apiImpl.GetUnsanitizedConfig() + api.recordTime(startTime, "GetUnsanitizedConfig", true) + return _returnsA +} + +func (api *apiTimerLayer) SaveConfig(config *model.Config) *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.SaveConfig(config) + api.recordTime(startTime, "SaveConfig", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) GetPluginConfig() map[string]interface{} { + startTime := timePkg.Now() + _returnsA := api.apiImpl.GetPluginConfig() + api.recordTime(startTime, "GetPluginConfig", true) + return _returnsA +} + +func (api *apiTimerLayer) SavePluginConfig(config map[string]interface{}) *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.SavePluginConfig(config) + api.recordTime(startTime, "SavePluginConfig", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) GetBundlePath() (string, error) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetBundlePath() + api.recordTime(startTime, "GetBundlePath", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetLicense() *model.License { + startTime := timePkg.Now() + _returnsA := api.apiImpl.GetLicense() + api.recordTime(startTime, "GetLicense", true) + return _returnsA +} + +func (api *apiTimerLayer) GetServerVersion() string { + startTime := timePkg.Now() + _returnsA := api.apiImpl.GetServerVersion() + api.recordTime(startTime, "GetServerVersion", true) + return _returnsA +} + +func (api *apiTimerLayer) GetSystemInstallDate() (int64, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetSystemInstallDate() + api.recordTime(startTime, "GetSystemInstallDate", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetDiagnosticId() string { + startTime := timePkg.Now() + _returnsA := api.apiImpl.GetDiagnosticId() + api.recordTime(startTime, "GetDiagnosticId", true) + return _returnsA +} + +func (api *apiTimerLayer) GetTelemetryId() string { + startTime := timePkg.Now() + _returnsA := api.apiImpl.GetTelemetryId() + api.recordTime(startTime, "GetTelemetryId", true) + return _returnsA +} + +func (api *apiTimerLayer) CreateUser(user *model.User) (*model.User, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.CreateUser(user) + api.recordTime(startTime, "CreateUser", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) DeleteUser(userId string) *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.DeleteUser(userId) + api.recordTime(startTime, "DeleteUser", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) GetUsers(options *model.UserGetOptions) ([]*model.User, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetUsers(options) + api.recordTime(startTime, "GetUsers", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetUser(userId string) (*model.User, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetUser(userId) + api.recordTime(startTime, "GetUser", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetUserByEmail(email string) (*model.User, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetUserByEmail(email) + api.recordTime(startTime, "GetUserByEmail", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetUserByUsername(name string) (*model.User, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetUserByUsername(name) + api.recordTime(startTime, "GetUserByUsername", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetUsersByUsernames(usernames []string) ([]*model.User, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetUsersByUsernames(usernames) + api.recordTime(startTime, "GetUsersByUsernames", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetUsersInTeam(teamId string, page int, perPage int) ([]*model.User, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetUsersInTeam(teamId, page, perPage) + api.recordTime(startTime, "GetUsersInTeam", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetPreferencesForUser(userId string) ([]model.Preference, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetPreferencesForUser(userId) + api.recordTime(startTime, "GetPreferencesForUser", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) UpdatePreferencesForUser(userId string, preferences []model.Preference) *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.UpdatePreferencesForUser(userId, preferences) + api.recordTime(startTime, "UpdatePreferencesForUser", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) DeletePreferencesForUser(userId string, preferences []model.Preference) *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.DeletePreferencesForUser(userId, preferences) + api.recordTime(startTime, "DeletePreferencesForUser", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) GetTeamIcon(teamId string) ([]byte, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetTeamIcon(teamId) + api.recordTime(startTime, "GetTeamIcon", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) SetTeamIcon(teamId string, data []byte) *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.SetTeamIcon(teamId, data) + api.recordTime(startTime, "SetTeamIcon", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) RemoveTeamIcon(teamId string) *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.RemoveTeamIcon(teamId) + api.recordTime(startTime, "RemoveTeamIcon", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) UpdateUser(user *model.User) (*model.User, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.UpdateUser(user) + api.recordTime(startTime, "UpdateUser", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetUserStatus(userId string) (*model.Status, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetUserStatus(userId) + api.recordTime(startTime, "GetUserStatus", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetUserStatusesByIds(userIds []string) ([]*model.Status, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetUserStatusesByIds(userIds) + api.recordTime(startTime, "GetUserStatusesByIds", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) UpdateUserStatus(userId, status string) (*model.Status, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.UpdateUserStatus(userId, status) + api.recordTime(startTime, "UpdateUserStatus", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) UpdateUserActive(userId string, active bool) *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.UpdateUserActive(userId, active) + api.recordTime(startTime, "UpdateUserActive", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) GetUsersInChannel(channelId, sortBy string, page, perPage int) ([]*model.User, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetUsersInChannel(channelId, sortBy, page, perPage) + api.recordTime(startTime, "GetUsersInChannel", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetLDAPUserAttributes(userId string, attributes []string) (map[string]string, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetLDAPUserAttributes(userId, attributes) + api.recordTime(startTime, "GetLDAPUserAttributes", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) CreateTeam(team *model.Team) (*model.Team, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.CreateTeam(team) + api.recordTime(startTime, "CreateTeam", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) DeleteTeam(teamId string) *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.DeleteTeam(teamId) + api.recordTime(startTime, "DeleteTeam", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) GetTeams() ([]*model.Team, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetTeams() + api.recordTime(startTime, "GetTeams", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetTeam(teamId string) (*model.Team, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetTeam(teamId) + api.recordTime(startTime, "GetTeam", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetTeamByName(name string) (*model.Team, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetTeamByName(name) + api.recordTime(startTime, "GetTeamByName", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetTeamsUnreadForUser(userId string) ([]*model.TeamUnread, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetTeamsUnreadForUser(userId) + api.recordTime(startTime, "GetTeamsUnreadForUser", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) UpdateTeam(team *model.Team) (*model.Team, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.UpdateTeam(team) + api.recordTime(startTime, "UpdateTeam", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) SearchTeams(term string) ([]*model.Team, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.SearchTeams(term) + api.recordTime(startTime, "SearchTeams", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetTeamsForUser(userId string) ([]*model.Team, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetTeamsForUser(userId) + api.recordTime(startTime, "GetTeamsForUser", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) CreateTeamMember(teamId, userId string) (*model.TeamMember, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.CreateTeamMember(teamId, userId) + api.recordTime(startTime, "CreateTeamMember", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) CreateTeamMembers(teamId string, userIds []string, requestorId string) ([]*model.TeamMember, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.CreateTeamMembers(teamId, userIds, requestorId) + api.recordTime(startTime, "CreateTeamMembers", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) CreateTeamMembersGracefully(teamId string, userIds []string, requestorId string) ([]*model.TeamMemberWithError, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.CreateTeamMembersGracefully(teamId, userIds, requestorId) + api.recordTime(startTime, "CreateTeamMembersGracefully", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) DeleteTeamMember(teamId, userId, requestorId string) *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.DeleteTeamMember(teamId, userId, requestorId) + api.recordTime(startTime, "DeleteTeamMember", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) GetTeamMembers(teamId string, page, perPage int) ([]*model.TeamMember, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetTeamMembers(teamId, page, perPage) + api.recordTime(startTime, "GetTeamMembers", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetTeamMember(teamId, userId string) (*model.TeamMember, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetTeamMember(teamId, userId) + api.recordTime(startTime, "GetTeamMember", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetTeamMembersForUser(userId string, page int, perPage int) ([]*model.TeamMember, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetTeamMembersForUser(userId, page, perPage) + api.recordTime(startTime, "GetTeamMembersForUser", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) UpdateTeamMemberRoles(teamId, userId, newRoles string) (*model.TeamMember, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.UpdateTeamMemberRoles(teamId, userId, newRoles) + api.recordTime(startTime, "UpdateTeamMemberRoles", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) CreateChannel(channel *model.Channel) (*model.Channel, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.CreateChannel(channel) + api.recordTime(startTime, "CreateChannel", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) DeleteChannel(channelId string) *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.DeleteChannel(channelId) + api.recordTime(startTime, "DeleteChannel", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) GetPublicChannelsForTeam(teamId string, page, perPage int) ([]*model.Channel, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetPublicChannelsForTeam(teamId, page, perPage) + api.recordTime(startTime, "GetPublicChannelsForTeam", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetChannel(channelId string) (*model.Channel, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetChannel(channelId) + api.recordTime(startTime, "GetChannel", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetChannelByName(teamId, name string, includeDeleted bool) (*model.Channel, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetChannelByName(teamId, name, includeDeleted) + api.recordTime(startTime, "GetChannelByName", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetChannelByNameForTeamName(teamName, channelName string, includeDeleted bool) (*model.Channel, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetChannelByNameForTeamName(teamName, channelName, includeDeleted) + api.recordTime(startTime, "GetChannelByNameForTeamName", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetChannelsForTeamForUser(teamId, userId string, includeDeleted bool) ([]*model.Channel, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetChannelsForTeamForUser(teamId, userId, includeDeleted) + api.recordTime(startTime, "GetChannelsForTeamForUser", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetChannelStats(channelId string) (*model.ChannelStats, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetChannelStats(channelId) + api.recordTime(startTime, "GetChannelStats", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetDirectChannel(userId1, userId2 string) (*model.Channel, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetDirectChannel(userId1, userId2) + api.recordTime(startTime, "GetDirectChannel", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetGroupChannel(userIds []string) (*model.Channel, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetGroupChannel(userIds) + api.recordTime(startTime, "GetGroupChannel", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) UpdateChannel(channel *model.Channel) (*model.Channel, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.UpdateChannel(channel) + api.recordTime(startTime, "UpdateChannel", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) SearchChannels(teamId string, term string) ([]*model.Channel, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.SearchChannels(teamId, term) + api.recordTime(startTime, "SearchChannels", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) SearchUsers(search *model.UserSearch) ([]*model.User, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.SearchUsers(search) + api.recordTime(startTime, "SearchUsers", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) SearchPostsInTeam(teamId string, paramsList []*model.SearchParams) ([]*model.Post, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.SearchPostsInTeam(teamId, paramsList) + api.recordTime(startTime, "SearchPostsInTeam", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) SearchPostsInTeamForUser(teamId string, userId string, searchParams model.SearchParameter) (*model.PostSearchResults, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.SearchPostsInTeamForUser(teamId, userId, searchParams) + api.recordTime(startTime, "SearchPostsInTeamForUser", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) AddChannelMember(channelId, userId string) (*model.ChannelMember, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.AddChannelMember(channelId, userId) + api.recordTime(startTime, "AddChannelMember", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) AddUserToChannel(channelId, userId, asUserId string) (*model.ChannelMember, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.AddUserToChannel(channelId, userId, asUserId) + api.recordTime(startTime, "AddUserToChannel", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetChannelMember(channelId, userId string) (*model.ChannelMember, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetChannelMember(channelId, userId) + api.recordTime(startTime, "GetChannelMember", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetChannelMembers(channelId string, page, perPage int) (*model.ChannelMembers, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetChannelMembers(channelId, page, perPage) + api.recordTime(startTime, "GetChannelMembers", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetChannelMembersByIds(channelId string, userIds []string) (*model.ChannelMembers, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetChannelMembersByIds(channelId, userIds) + api.recordTime(startTime, "GetChannelMembersByIds", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetChannelMembersForUser(teamId, userId string, page, perPage int) ([]*model.ChannelMember, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetChannelMembersForUser(teamId, userId, page, perPage) + api.recordTime(startTime, "GetChannelMembersForUser", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) UpdateChannelMemberRoles(channelId, userId, newRoles string) (*model.ChannelMember, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.UpdateChannelMemberRoles(channelId, userId, newRoles) + api.recordTime(startTime, "UpdateChannelMemberRoles", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) UpdateChannelMemberNotifications(channelId, userId string, notifications map[string]string) (*model.ChannelMember, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.UpdateChannelMemberNotifications(channelId, userId, notifications) + api.recordTime(startTime, "UpdateChannelMemberNotifications", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetGroup(groupId string) (*model.Group, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetGroup(groupId) + api.recordTime(startTime, "GetGroup", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetGroupByName(name string) (*model.Group, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetGroupByName(name) + api.recordTime(startTime, "GetGroupByName", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetGroupsForUser(userId string) ([]*model.Group, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetGroupsForUser(userId) + api.recordTime(startTime, "GetGroupsForUser", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) DeleteChannelMember(channelId, userId string) *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.DeleteChannelMember(channelId, userId) + api.recordTime(startTime, "DeleteChannelMember", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) CreatePost(post *model.Post) (*model.Post, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.CreatePost(post) + api.recordTime(startTime, "CreatePost", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) AddReaction(reaction *model.Reaction) (*model.Reaction, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.AddReaction(reaction) + api.recordTime(startTime, "AddReaction", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) RemoveReaction(reaction *model.Reaction) *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.RemoveReaction(reaction) + api.recordTime(startTime, "RemoveReaction", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) GetReactions(postId string) ([]*model.Reaction, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetReactions(postId) + api.recordTime(startTime, "GetReactions", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) SendEphemeralPost(userId string, post *model.Post) *model.Post { + startTime := timePkg.Now() + _returnsA := api.apiImpl.SendEphemeralPost(userId, post) + api.recordTime(startTime, "SendEphemeralPost", true) + return _returnsA +} + +func (api *apiTimerLayer) UpdateEphemeralPost(userId string, post *model.Post) *model.Post { + startTime := timePkg.Now() + _returnsA := api.apiImpl.UpdateEphemeralPost(userId, post) + api.recordTime(startTime, "UpdateEphemeralPost", true) + return _returnsA +} + +func (api *apiTimerLayer) DeleteEphemeralPost(userId, postId string) { + startTime := timePkg.Now() + api.apiImpl.DeleteEphemeralPost(userId, postId) + api.recordTime(startTime, "DeleteEphemeralPost", true) +} + +func (api *apiTimerLayer) DeletePost(postId string) *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.DeletePost(postId) + api.recordTime(startTime, "DeletePost", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) GetPostThread(postId string) (*model.PostList, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetPostThread(postId) + api.recordTime(startTime, "GetPostThread", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetPost(postId string) (*model.Post, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetPost(postId) + api.recordTime(startTime, "GetPost", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetPostsSince(channelId string, time int64) (*model.PostList, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetPostsSince(channelId, time) + api.recordTime(startTime, "GetPostsSince", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetPostsAfter(channelId, postId string, page, perPage int) (*model.PostList, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetPostsAfter(channelId, postId, page, perPage) + api.recordTime(startTime, "GetPostsAfter", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetPostsBefore(channelId, postId string, page, perPage int) (*model.PostList, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetPostsBefore(channelId, postId, page, perPage) + api.recordTime(startTime, "GetPostsBefore", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetPostsForChannel(channelId string, page, perPage int) (*model.PostList, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetPostsForChannel(channelId, page, perPage) + api.recordTime(startTime, "GetPostsForChannel", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetTeamStats(teamId string) (*model.TeamStats, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetTeamStats(teamId) + api.recordTime(startTime, "GetTeamStats", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) UpdatePost(post *model.Post) (*model.Post, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.UpdatePost(post) + api.recordTime(startTime, "UpdatePost", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetProfileImage(userId string) ([]byte, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetProfileImage(userId) + api.recordTime(startTime, "GetProfileImage", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) SetProfileImage(userId string, data []byte) *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.SetProfileImage(userId, data) + api.recordTime(startTime, "SetProfileImage", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) GetEmojiList(sortBy string, page, perPage int) ([]*model.Emoji, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetEmojiList(sortBy, page, perPage) + api.recordTime(startTime, "GetEmojiList", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetEmojiByName(name string) (*model.Emoji, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetEmojiByName(name) + api.recordTime(startTime, "GetEmojiByName", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetEmoji(emojiId string) (*model.Emoji, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetEmoji(emojiId) + api.recordTime(startTime, "GetEmoji", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) CopyFileInfos(userId string, fileIds []string) ([]string, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.CopyFileInfos(userId, fileIds) + api.recordTime(startTime, "CopyFileInfos", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetFileInfo(fileId string) (*model.FileInfo, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetFileInfo(fileId) + api.recordTime(startTime, "GetFileInfo", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetFileInfos(page, perPage int, opt *model.GetFileInfosOptions) ([]*model.FileInfo, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetFileInfos(page, perPage, opt) + api.recordTime(startTime, "GetFileInfos", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetFile(fileId string) ([]byte, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetFile(fileId) + api.recordTime(startTime, "GetFile", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetFileLink(fileId string) (string, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetFileLink(fileId) + api.recordTime(startTime, "GetFileLink", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) ReadFile(path string) ([]byte, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.ReadFile(path) + api.recordTime(startTime, "ReadFile", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetEmojiImage(emojiId string) ([]byte, string, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB, _returnsC := api.apiImpl.GetEmojiImage(emojiId) + api.recordTime(startTime, "GetEmojiImage", _returnsC == nil) + return _returnsA, _returnsB, _returnsC +} + +func (api *apiTimerLayer) UploadFile(data []byte, channelId string, filename string) (*model.FileInfo, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.UploadFile(data, channelId, filename) + api.recordTime(startTime, "UploadFile", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) OpenInteractiveDialog(dialog model.OpenDialogRequest) *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.OpenInteractiveDialog(dialog) + api.recordTime(startTime, "OpenInteractiveDialog", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) GetPlugins() ([]*model.Manifest, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetPlugins() + api.recordTime(startTime, "GetPlugins", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) EnablePlugin(id string) *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.EnablePlugin(id) + api.recordTime(startTime, "EnablePlugin", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) DisablePlugin(id string) *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.DisablePlugin(id) + api.recordTime(startTime, "DisablePlugin", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) RemovePlugin(id string) *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.RemovePlugin(id) + api.recordTime(startTime, "RemovePlugin", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) GetPluginStatus(id string) (*model.PluginStatus, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetPluginStatus(id) + api.recordTime(startTime, "GetPluginStatus", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) InstallPlugin(file io.Reader, replace bool) (*model.Manifest, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.InstallPlugin(file, replace) + api.recordTime(startTime, "InstallPlugin", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) KVSet(key string, value []byte) *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.KVSet(key, value) + api.recordTime(startTime, "KVSet", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) KVCompareAndSet(key string, oldValue, newValue []byte) (bool, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.KVCompareAndSet(key, oldValue, newValue) + api.recordTime(startTime, "KVCompareAndSet", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) KVCompareAndDelete(key string, oldValue []byte) (bool, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.KVCompareAndDelete(key, oldValue) + api.recordTime(startTime, "KVCompareAndDelete", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) KVSetWithOptions(key string, value []byte, options model.PluginKVSetOptions) (bool, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.KVSetWithOptions(key, value, options) + api.recordTime(startTime, "KVSetWithOptions", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) KVSetWithExpiry(key string, value []byte, expireInSeconds int64) *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.KVSetWithExpiry(key, value, expireInSeconds) + api.recordTime(startTime, "KVSetWithExpiry", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) KVGet(key string) ([]byte, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.KVGet(key) + api.recordTime(startTime, "KVGet", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) KVDelete(key string) *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.KVDelete(key) + api.recordTime(startTime, "KVDelete", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) KVDeleteAll() *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.KVDeleteAll() + api.recordTime(startTime, "KVDeleteAll", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) KVList(page, perPage int) ([]string, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.KVList(page, perPage) + api.recordTime(startTime, "KVList", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) PublishWebSocketEvent(event string, payload map[string]interface{}, broadcast *model.WebsocketBroadcast) { + startTime := timePkg.Now() + api.apiImpl.PublishWebSocketEvent(event, payload, broadcast) + api.recordTime(startTime, "PublishWebSocketEvent", true) +} + +func (api *apiTimerLayer) HasPermissionTo(userId string, permission *model.Permission) bool { + startTime := timePkg.Now() + _returnsA := api.apiImpl.HasPermissionTo(userId, permission) + api.recordTime(startTime, "HasPermissionTo", true) + return _returnsA +} + +func (api *apiTimerLayer) HasPermissionToTeam(userId, teamId string, permission *model.Permission) bool { + startTime := timePkg.Now() + _returnsA := api.apiImpl.HasPermissionToTeam(userId, teamId, permission) + api.recordTime(startTime, "HasPermissionToTeam", true) + return _returnsA +} + +func (api *apiTimerLayer) HasPermissionToChannel(userId, channelId string, permission *model.Permission) bool { + startTime := timePkg.Now() + _returnsA := api.apiImpl.HasPermissionToChannel(userId, channelId, permission) + api.recordTime(startTime, "HasPermissionToChannel", true) + return _returnsA +} + +func (api *apiTimerLayer) LogDebug(msg string, keyValuePairs ...interface{}) { + startTime := timePkg.Now() + api.apiImpl.LogDebug(msg, keyValuePairs...) + api.recordTime(startTime, "LogDebug", true) +} + +func (api *apiTimerLayer) LogInfo(msg string, keyValuePairs ...interface{}) { + startTime := timePkg.Now() + api.apiImpl.LogInfo(msg, keyValuePairs...) + api.recordTime(startTime, "LogInfo", true) +} + +func (api *apiTimerLayer) LogError(msg string, keyValuePairs ...interface{}) { + startTime := timePkg.Now() + api.apiImpl.LogError(msg, keyValuePairs...) + api.recordTime(startTime, "LogError", true) +} + +func (api *apiTimerLayer) LogWarn(msg string, keyValuePairs ...interface{}) { + startTime := timePkg.Now() + api.apiImpl.LogWarn(msg, keyValuePairs...) + api.recordTime(startTime, "LogWarn", true) +} + +func (api *apiTimerLayer) SendMail(to, subject, htmlBody string) *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.SendMail(to, subject, htmlBody) + api.recordTime(startTime, "SendMail", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) CreateBot(bot *model.Bot) (*model.Bot, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.CreateBot(bot) + api.recordTime(startTime, "CreateBot", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) PatchBot(botUserId string, botPatch *model.BotPatch) (*model.Bot, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.PatchBot(botUserId, botPatch) + api.recordTime(startTime, "PatchBot", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetBot(botUserId string, includeDeleted bool) (*model.Bot, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetBot(botUserId, includeDeleted) + api.recordTime(startTime, "GetBot", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetBots(options *model.BotGetOptions) ([]*model.Bot, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetBots(options) + api.recordTime(startTime, "GetBots", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) UpdateBotActive(botUserId string, active bool) (*model.Bot, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.UpdateBotActive(botUserId, active) + api.recordTime(startTime, "UpdateBotActive", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) PermanentDeleteBot(botUserId string) *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.PermanentDeleteBot(botUserId) + api.recordTime(startTime, "PermanentDeleteBot", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) GetBotIconImage(botUserId string) ([]byte, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetBotIconImage(botUserId) + api.recordTime(startTime, "GetBotIconImage", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) SetBotIconImage(botUserId string, data []byte) *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.SetBotIconImage(botUserId, data) + api.recordTime(startTime, "SetBotIconImage", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) DeleteBotIconImage(botUserId string) *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.DeleteBotIconImage(botUserId) + api.recordTime(startTime, "DeleteBotIconImage", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) PluginHTTP(request *http.Request) *http.Response { + startTime := timePkg.Now() + _returnsA := api.apiImpl.PluginHTTP(request) + api.recordTime(startTime, "PluginHTTP", true) + return _returnsA +} + +func (api *apiTimerLayer) PublishUserTyping(userId, channelId, parentId string) *model.AppError { + startTime := timePkg.Now() + _returnsA := api.apiImpl.PublishUserTyping(userId, channelId, parentId) + api.recordTime(startTime, "PublishUserTyping", _returnsA == nil) + return _returnsA +} + +func (api *apiTimerLayer) CreateCommand(cmd *model.Command) (*model.Command, error) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.CreateCommand(cmd) + api.recordTime(startTime, "CreateCommand", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) ListCommands(teamID string) ([]*model.Command, error) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.ListCommands(teamID) + api.recordTime(startTime, "ListCommands", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) ListCustomCommands(teamID string) ([]*model.Command, error) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.ListCustomCommands(teamID) + api.recordTime(startTime, "ListCustomCommands", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) ListPluginCommands(teamID string) ([]*model.Command, error) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.ListPluginCommands(teamID) + api.recordTime(startTime, "ListPluginCommands", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) ListBuiltInCommands() ([]*model.Command, error) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.ListBuiltInCommands() + api.recordTime(startTime, "ListBuiltInCommands", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) GetCommand(commandID string) (*model.Command, error) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.GetCommand(commandID) + api.recordTime(startTime, "GetCommand", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) UpdateCommand(commandID string, updatedCmd *model.Command) (*model.Command, error) { + startTime := timePkg.Now() + _returnsA, _returnsB := api.apiImpl.UpdateCommand(commandID, updatedCmd) + api.recordTime(startTime, "UpdateCommand", _returnsB == nil) + return _returnsA, _returnsB +} + +func (api *apiTimerLayer) DeleteCommand(commandID string) error { + startTime := timePkg.Now() + _returnsA := api.apiImpl.DeleteCommand(commandID) + api.recordTime(startTime, "DeleteCommand", _returnsA == nil) + return _returnsA +} diff --git a/vendor/github.com/mattermost/mattermost-server/plugin/client.go b/vendor/github.com/mattermost/mattermost-server/v5/plugin/client.go similarity index 94% rename from vendor/github.com/mattermost/mattermost-server/plugin/client.go rename to vendor/github.com/mattermost/mattermost-server/v5/plugin/client.go index e445f9e..ed3d872 100644 --- a/vendor/github.com/mattermost/mattermost-server/plugin/client.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/plugin/client.go @@ -8,8 +8,8 @@ import ( ) const ( - INTERNAL_KEY_PREFIX = "mmi_" - BOT_USER_KEY = INTERNAL_KEY_PREFIX + "botid" + InternalKeyPrefix = "mmi_" + BotUserKey = InternalKeyPrefix + "botid" ) // Starts the serving of a Mattermost plugin over net/rpc. gRPC is not yet supported. diff --git a/vendor/github.com/mattermost/mattermost-server/plugin/client_rpc.go b/vendor/github.com/mattermost/mattermost-server/v5/plugin/client_rpc.go similarity index 64% rename from vendor/github.com/mattermost/mattermost-server/plugin/client_rpc.go rename to vendor/github.com/mattermost/mattermost-server/v5/plugin/client_rpc.go index a43107c..7d1f5cc 100644 --- a/vendor/github.com/mattermost/mattermost-server/plugin/client_rpc.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/plugin/client_rpc.go @@ -19,9 +19,10 @@ import ( "reflect" "github.com/dyatlov/go-opengraph/opengraph" - plugin "github.com/hashicorp/go-plugin" - "github.com/mattermost/mattermost-server/mlog" - "github.com/mattermost/mattermost-server/model" + "github.com/hashicorp/go-plugin" + + "github.com/mattermost/mattermost-server/v5/mlog" + "github.com/mattermost/mattermost-server/v5/model" ) var hookNameToId map[string]int = make(map[string]int) @@ -56,11 +57,13 @@ func (p *hooksPlugin) Client(b *plugin.MuxBroker, client *rpc.Client) (interface } type apiRPCClient struct { - client *rpc.Client + client *rpc.Client + muxBroker *plugin.MuxBroker } type apiRPCServer struct { - impl API + impl API + muxBroker *plugin.MuxBroker } // ErrorString is a fallback for sending unregistered implementations of the error interface across @@ -97,6 +100,9 @@ func init() { gob.Register(&model.AppError{}) gob.Register(&ErrorString{}) gob.Register(&opengraph.OpenGraph{}) + gob.Register(&model.AutocompleteDynamicListArg{}) + gob.Register(&model.AutocompleteStaticListArg{}) + gob.Register(&model.AutocompleteTextArg{}) } // These enforce compile time checks to make sure types implement the interface @@ -106,7 +112,7 @@ var _ plugin.Plugin = &hooksPlugin{} var _ Hooks = &hooksRPCClient{} // -// Below are specal cases for hooks or APIs that can not be auto generated +// Below are special cases for hooks or APIs that can not be auto generated // func (g *hooksRPCClient) Implemented() (impl []string, err error) { @@ -171,7 +177,8 @@ type Z_OnActivateReturns struct { func (g *hooksRPCClient) OnActivate() error { muxId := g.muxBroker.NextId() go g.muxBroker.AcceptAndServe(muxId, &apiRPCServer{ - impl: g.apiImpl, + impl: g.apiImpl, + muxBroker: g.muxBroker, }) _args := &Z_OnActivateArgs{ @@ -192,7 +199,8 @@ func (s *hooksRPCServer) OnActivate(args *Z_OnActivateArgs, returns *Z_OnActivat } s.apiRPCClient = &apiRPCClient{ - client: rpc.NewClient(connection), + client: rpc.NewClient(connection), + muxBroker: s.muxBroker, } if mmplugin, ok := s.impl.(interface { @@ -286,8 +294,8 @@ func (g *hooksRPCClient) ServeHTTP(c *Context, w http.ResponseWriter, r *http.Re defer connection.Close() rpcServer := rpc.NewServer() - if err := rpcServer.RegisterName("Plugin", &httpResponseWriterRPCServer{w: w}); err != nil { - g.log.Error("Plugin failed to ServeHTTP, coulden't register RPC name", mlog.Err(err)) + if err := rpcServer.RegisterName("Plugin", &httpResponseWriterRPCServer{w: w, log: g.log}); err != nil { + g.log.Error("Plugin failed to ServeHTTP, couldn't register RPC name", mlog.Err(err)) return } rpcServer.ServeConn(connection) @@ -363,6 +371,76 @@ func (s *hooksRPCServer) ServeHTTP(args *Z_ServeHTTPArgs, returns *struct{}) err return nil } +type Z_PluginHTTPArgs struct { + Request *http.Request + RequestBody []byte +} + +type Z_PluginHTTPReturns struct { + Response *http.Response + ResponseBody []byte +} + +func (g *apiRPCClient) PluginHTTP(request *http.Request) *http.Response { + forwardedRequest := &http.Request{ + Method: request.Method, + URL: request.URL, + Proto: request.Proto, + ProtoMajor: request.ProtoMajor, + ProtoMinor: request.ProtoMinor, + Header: request.Header, + Host: request.Host, + RemoteAddr: request.RemoteAddr, + RequestURI: request.RequestURI, + } + + requestBody, err := ioutil.ReadAll(request.Body) + if err != nil { + log.Printf("RPC call to PluginHTTP API failed: %s", err.Error()) + return nil + } + request.Body.Close() + request.Body = nil + + _args := &Z_PluginHTTPArgs{ + Request: forwardedRequest, + RequestBody: requestBody, + } + + _returns := &Z_PluginHTTPReturns{} + if err := g.client.Call("Plugin.PluginHTTP", _args, _returns); err != nil { + log.Printf("RPC call to PluginHTTP API failed: %s", err.Error()) + return nil + } + + _returns.Response.Body = ioutil.NopCloser(bytes.NewBuffer(_returns.ResponseBody)) + + return _returns.Response +} + +func (s *apiRPCServer) PluginHTTP(args *Z_PluginHTTPArgs, returns *Z_PluginHTTPReturns) error { + args.Request.Body = ioutil.NopCloser(bytes.NewBuffer(args.RequestBody)) + + if hook, ok := s.impl.(interface { + PluginHTTP(request *http.Request) *http.Response + }); ok { + response := hook.PluginHTTP(args.Request) + + responseBody, err := ioutil.ReadAll(response.Body) + if err != nil { + return encodableError(fmt.Errorf("RPC call to PluginHTTP API failed: %s", err.Error())) + } + response.Body.Close() + response.Body = nil + + returns.Response = response + returns.ResponseBody = responseBody + } else { + return encodableError(fmt.Errorf("API PluginHTTP called but not implemented.")) + } + return nil +} + func init() { hookNameToId["FileWillBeUploaded"] = FileWillBeUploadedId } @@ -395,26 +473,31 @@ func (g *hooksRPCClient) FileWillBeUploaded(c *Context, info *model.FileInfo, fi serveIOReader(file, uploadedFileConnection) }() + replacementDone := make(chan bool) replacementFileStreamId := g.muxBroker.NextId() go func() { + defer close(replacementDone) + replacementFileConnection, err := g.muxBroker.Accept(replacementFileStreamId) if err != nil { g.log.Error("Plugin failed to serve replacement file stream. MuxBroker could not Accept connection", mlog.Err(err)) return } defer replacementFileConnection.Close() - if _, err := io.Copy(output, replacementFileConnection); err != nil && err != io.EOF { + if _, err := io.Copy(output, replacementFileConnection); err != nil { g.log.Error("Error reading replacement file.", mlog.Err(err)) } }() _args := &Z_FileWillBeUploadedArgs{c, info, uploadedFileStreamId, replacementFileStreamId} _returns := &Z_FileWillBeUploadedReturns{A: _args.B} - if g.implemented[FileWillBeUploadedId] { - if err := g.client.Call("Plugin.FileWillBeUploaded", _args, _returns); err != nil { - g.log.Error("RPC call FileWillBeUploaded to plugin failed.", mlog.Err(err)) - } + if err := g.client.Call("Plugin.FileWillBeUploaded", _args, _returns); err != nil { + g.log.Error("RPC call FileWillBeUploaded to plugin failed.", mlog.Err(err)) } + + // Ensure the io.Copy from the replacementFileConnection above completes. + <-replacementDone + return _returns.A, _returns.B } @@ -446,7 +529,7 @@ func (s *hooksRPCServer) FileWillBeUploaded(args *Z_FileWillBeUploadedArgs, retu return nil } -// MessageWillBePosted is in this file because of the difficulty of identifiying which fields need special behaviour. +// MessageWillBePosted is in this file because of the difficulty of identifying which fields need special behaviour. // The special behaviour needed is decoding the returned post into the original one to avoid the unintentional removal // of fields by older plugins. func init() { @@ -486,8 +569,8 @@ func (s *hooksRPCServer) MessageWillBePosted(args *Z_MessageWillBePostedArgs, re return nil } -// MessageWillBeUpdated is in this file because of the difficulty of identifiying which fields need special behaviour. -// The special behavour needed is decoding the returned post into the original one to avoid the unintentional removal +// MessageWillBeUpdated is in this file because of the difficulty of identifying which fields need special behaviour. +// The special behaviour needed is decoding the returned post into the original one to avoid the unintentional removal // of fields by older plugins. func init() { hookNameToId["MessageWillBeUpdated"] = MessageWillBeUpdatedId @@ -526,3 +609,170 @@ func (s *hooksRPCServer) MessageWillBeUpdated(args *Z_MessageWillBeUpdatedArgs, } return nil } + +type Z_LogDebugArgs struct { + A string + B []interface{} +} + +type Z_LogDebugReturns struct { +} + +func (g *apiRPCClient) LogDebug(msg string, keyValuePairs ...interface{}) { + stringifiedPairs := stringifyToObjects(keyValuePairs) + _args := &Z_LogDebugArgs{msg, stringifiedPairs} + _returns := &Z_LogDebugReturns{} + if err := g.client.Call("Plugin.LogDebug", _args, _returns); err != nil { + log.Printf("RPC call to LogDebug API failed: %s", err.Error()) + } + +} + +func (s *apiRPCServer) LogDebug(args *Z_LogDebugArgs, returns *Z_LogDebugReturns) error { + if hook, ok := s.impl.(interface { + LogDebug(msg string, keyValuePairs ...interface{}) + }); ok { + hook.LogDebug(args.A, args.B...) + } else { + return encodableError(fmt.Errorf("API LogDebug called but not implemented.")) + } + return nil +} + +type Z_LogInfoArgs struct { + A string + B []interface{} +} + +type Z_LogInfoReturns struct { +} + +func (g *apiRPCClient) LogInfo(msg string, keyValuePairs ...interface{}) { + stringifiedPairs := stringifyToObjects(keyValuePairs) + _args := &Z_LogInfoArgs{msg, stringifiedPairs} + _returns := &Z_LogInfoReturns{} + if err := g.client.Call("Plugin.LogInfo", _args, _returns); err != nil { + log.Printf("RPC call to LogInfo API failed: %s", err.Error()) + } + +} + +func (s *apiRPCServer) LogInfo(args *Z_LogInfoArgs, returns *Z_LogInfoReturns) error { + if hook, ok := s.impl.(interface { + LogInfo(msg string, keyValuePairs ...interface{}) + }); ok { + hook.LogInfo(args.A, args.B...) + } else { + return encodableError(fmt.Errorf("API LogInfo called but not implemented.")) + } + return nil +} + +type Z_LogWarnArgs struct { + A string + B []interface{} +} + +type Z_LogWarnReturns struct { +} + +func (g *apiRPCClient) LogWarn(msg string, keyValuePairs ...interface{}) { + stringifiedPairs := stringifyToObjects(keyValuePairs) + _args := &Z_LogWarnArgs{msg, stringifiedPairs} + _returns := &Z_LogWarnReturns{} + if err := g.client.Call("Plugin.LogWarn", _args, _returns); err != nil { + log.Printf("RPC call to LogWarn API failed: %s", err.Error()) + } + +} + +func (s *apiRPCServer) LogWarn(args *Z_LogWarnArgs, returns *Z_LogWarnReturns) error { + if hook, ok := s.impl.(interface { + LogWarn(msg string, keyValuePairs ...interface{}) + }); ok { + hook.LogWarn(args.A, args.B...) + } else { + return encodableError(fmt.Errorf("API LogWarn called but not implemented.")) + } + return nil +} + +type Z_LogErrorArgs struct { + A string + B []interface{} +} + +type Z_LogErrorReturns struct { +} + +func (g *apiRPCClient) LogError(msg string, keyValuePairs ...interface{}) { + stringifiedPairs := stringifyToObjects(keyValuePairs) + _args := &Z_LogErrorArgs{msg, stringifiedPairs} + _returns := &Z_LogErrorReturns{} + if err := g.client.Call("Plugin.LogError", _args, _returns); err != nil { + log.Printf("RPC call to LogError API failed: %s", err.Error()) + } +} + +func (s *apiRPCServer) LogError(args *Z_LogErrorArgs, returns *Z_LogErrorReturns) error { + if hook, ok := s.impl.(interface { + LogError(msg string, keyValuePairs ...interface{}) + }); ok { + hook.LogError(args.A, args.B...) + } else { + return encodableError(fmt.Errorf("API LogError called but not implemented.")) + } + return nil +} + +type Z_InstallPluginArgs struct { + PluginStreamID uint32 + B bool +} + +type Z_InstallPluginReturns struct { + A *model.Manifest + B *model.AppError +} + +func (g *apiRPCClient) InstallPlugin(file io.Reader, replace bool) (*model.Manifest, *model.AppError) { + pluginStreamID := g.muxBroker.NextId() + + go func() { + uploadPluginConnection, err := g.muxBroker.Accept(pluginStreamID) + if err != nil { + log.Print("Plugin failed to upload plugin. MuxBroker could not Accept connection", mlog.Err(err)) + return + } + defer uploadPluginConnection.Close() + serveIOReader(file, uploadPluginConnection) + }() + + _args := &Z_InstallPluginArgs{pluginStreamID, replace} + _returns := &Z_InstallPluginReturns{} + if err := g.client.Call("Plugin.InstallPlugin", _args, _returns); err != nil { + log.Print("RPC call InstallPlugin to plugin failed.", mlog.Err(err)) + } + + return _returns.A, _returns.B +} + +func (s *apiRPCServer) InstallPlugin(args *Z_InstallPluginArgs, returns *Z_InstallPluginReturns) error { + hook, ok := s.impl.(interface { + InstallPlugin(file io.Reader, replace bool) (*model.Manifest, *model.AppError) + }) + if !ok { + return encodableError(fmt.Errorf("API InstallPlugin called but not implemented.")) + } + + receivePluginConnection, err := s.muxBroker.Dial(args.PluginStreamID) + if err != nil { + fmt.Fprintf(os.Stderr, "[ERROR] Can't connect to remote plugin stream, error: %v", err.Error()) + return err + } + pluginReader := connectIOReader(receivePluginConnection) + defer pluginReader.Close() + + returns.A, returns.B = hook.InstallPlugin(pluginReader, args.B) + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/plugin/client_rpc_generated.go b/vendor/github.com/mattermost/mattermost-server/v5/plugin/client_rpc_generated.go similarity index 83% rename from vendor/github.com/mattermost/mattermost-server/plugin/client_rpc_generated.go rename to vendor/github.com/mattermost/mattermost-server/v5/plugin/client_rpc_generated.go index 3634e72..a57d4a3 100644 --- a/vendor/github.com/mattermost/mattermost-server/plugin/client_rpc_generated.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/plugin/client_rpc_generated.go @@ -10,8 +10,8 @@ import ( "fmt" "log" - "github.com/mattermost/mattermost-server/mlog" - "github.com/mattermost/mattermost-server/model" + "github.com/mattermost/mattermost-server/v5/mlog" + "github.com/mattermost/mattermost-server/v5/model" ) func init() { @@ -112,7 +112,6 @@ func (s *hooksRPCServer) ExecuteCommand(args *Z_ExecuteCommandArgs, returns *Z_E ExecuteCommand(c *Context, args *model.CommandArgs) (*model.CommandResponse, *model.AppError) }); ok { returns.A, returns.B = hook.ExecuteCommand(args.A, args.B) - } else { return encodableError(fmt.Errorf("Hook ExecuteCommand called but not implemented.")) } @@ -147,7 +146,6 @@ func (s *hooksRPCServer) UserHasBeenCreated(args *Z_UserHasBeenCreatedArgs, retu UserHasBeenCreated(c *Context, user *model.User) }); ok { hook.UserHasBeenCreated(args.A, args.B) - } else { return encodableError(fmt.Errorf("Hook UserHasBeenCreated called but not implemented.")) } @@ -183,7 +181,6 @@ func (s *hooksRPCServer) UserWillLogIn(args *Z_UserWillLogInArgs, returns *Z_Use UserWillLogIn(c *Context, user *model.User) string }); ok { returns.A = hook.UserWillLogIn(args.A, args.B) - } else { return encodableError(fmt.Errorf("Hook UserWillLogIn called but not implemented.")) } @@ -218,7 +215,6 @@ func (s *hooksRPCServer) UserHasLoggedIn(args *Z_UserHasLoggedInArgs, returns *Z UserHasLoggedIn(c *Context, user *model.User) }); ok { hook.UserHasLoggedIn(args.A, args.B) - } else { return encodableError(fmt.Errorf("Hook UserHasLoggedIn called but not implemented.")) } @@ -253,7 +249,6 @@ func (s *hooksRPCServer) MessageHasBeenPosted(args *Z_MessageHasBeenPostedArgs, MessageHasBeenPosted(c *Context, post *model.Post) }); ok { hook.MessageHasBeenPosted(args.A, args.B) - } else { return encodableError(fmt.Errorf("Hook MessageHasBeenPosted called but not implemented.")) } @@ -289,7 +284,6 @@ func (s *hooksRPCServer) MessageHasBeenUpdated(args *Z_MessageHasBeenUpdatedArgs MessageHasBeenUpdated(c *Context, newPost, oldPost *model.Post) }); ok { hook.MessageHasBeenUpdated(args.A, args.B, args.C) - } else { return encodableError(fmt.Errorf("Hook MessageHasBeenUpdated called but not implemented.")) } @@ -324,7 +318,6 @@ func (s *hooksRPCServer) ChannelHasBeenCreated(args *Z_ChannelHasBeenCreatedArgs ChannelHasBeenCreated(c *Context, channel *model.Channel) }); ok { hook.ChannelHasBeenCreated(args.A, args.B) - } else { return encodableError(fmt.Errorf("Hook ChannelHasBeenCreated called but not implemented.")) } @@ -360,7 +353,6 @@ func (s *hooksRPCServer) UserHasJoinedChannel(args *Z_UserHasJoinedChannelArgs, UserHasJoinedChannel(c *Context, channelMember *model.ChannelMember, actor *model.User) }); ok { hook.UserHasJoinedChannel(args.A, args.B, args.C) - } else { return encodableError(fmt.Errorf("Hook UserHasJoinedChannel called but not implemented.")) } @@ -396,7 +388,6 @@ func (s *hooksRPCServer) UserHasLeftChannel(args *Z_UserHasLeftChannelArgs, retu UserHasLeftChannel(c *Context, channelMember *model.ChannelMember, actor *model.User) }); ok { hook.UserHasLeftChannel(args.A, args.B, args.C) - } else { return encodableError(fmt.Errorf("Hook UserHasLeftChannel called but not implemented.")) } @@ -432,7 +423,6 @@ func (s *hooksRPCServer) UserHasJoinedTeam(args *Z_UserHasJoinedTeamArgs, return UserHasJoinedTeam(c *Context, teamMember *model.TeamMember, actor *model.User) }); ok { hook.UserHasJoinedTeam(args.A, args.B, args.C) - } else { return encodableError(fmt.Errorf("Hook UserHasJoinedTeam called but not implemented.")) } @@ -468,13 +458,80 @@ func (s *hooksRPCServer) UserHasLeftTeam(args *Z_UserHasLeftTeamArgs, returns *Z UserHasLeftTeam(c *Context, teamMember *model.TeamMember, actor *model.User) }); ok { hook.UserHasLeftTeam(args.A, args.B, args.C) - } else { return encodableError(fmt.Errorf("Hook UserHasLeftTeam called but not implemented.")) } return nil } +func init() { + hookNameToId["ReactionHasBeenAdded"] = ReactionHasBeenAddedId +} + +type Z_ReactionHasBeenAddedArgs struct { + A *Context + B *model.Reaction +} + +type Z_ReactionHasBeenAddedReturns struct { +} + +func (g *hooksRPCClient) ReactionHasBeenAdded(c *Context, reaction *model.Reaction) { + _args := &Z_ReactionHasBeenAddedArgs{c, reaction} + _returns := &Z_ReactionHasBeenAddedReturns{} + if g.implemented[ReactionHasBeenAddedId] { + if err := g.client.Call("Plugin.ReactionHasBeenAdded", _args, _returns); err != nil { + g.log.Error("RPC call ReactionHasBeenAdded to plugin failed.", mlog.Err(err)) + } + } + +} + +func (s *hooksRPCServer) ReactionHasBeenAdded(args *Z_ReactionHasBeenAddedArgs, returns *Z_ReactionHasBeenAddedReturns) error { + if hook, ok := s.impl.(interface { + ReactionHasBeenAdded(c *Context, reaction *model.Reaction) + }); ok { + hook.ReactionHasBeenAdded(args.A, args.B) + } else { + return encodableError(fmt.Errorf("Hook ReactionHasBeenAdded called but not implemented.")) + } + return nil +} + +func init() { + hookNameToId["ReactionHasBeenRemoved"] = ReactionHasBeenRemovedId +} + +type Z_ReactionHasBeenRemovedArgs struct { + A *Context + B *model.Reaction +} + +type Z_ReactionHasBeenRemovedReturns struct { +} + +func (g *hooksRPCClient) ReactionHasBeenRemoved(c *Context, reaction *model.Reaction) { + _args := &Z_ReactionHasBeenRemovedArgs{c, reaction} + _returns := &Z_ReactionHasBeenRemovedReturns{} + if g.implemented[ReactionHasBeenRemovedId] { + if err := g.client.Call("Plugin.ReactionHasBeenRemoved", _args, _returns); err != nil { + g.log.Error("RPC call ReactionHasBeenRemoved to plugin failed.", mlog.Err(err)) + } + } + +} + +func (s *hooksRPCServer) ReactionHasBeenRemoved(args *Z_ReactionHasBeenRemovedArgs, returns *Z_ReactionHasBeenRemovedReturns) error { + if hook, ok := s.impl.(interface { + ReactionHasBeenRemoved(c *Context, reaction *model.Reaction) + }); ok { + hook.ReactionHasBeenRemoved(args.A, args.B) + } else { + return encodableError(fmt.Errorf("Hook ReactionHasBeenRemoved called but not implemented.")) + } + return nil +} + type Z_RegisterCommandArgs struct { A *model.Command } @@ -497,6 +554,7 @@ func (s *apiRPCServer) RegisterCommand(args *Z_RegisterCommandArgs, returns *Z_R RegisterCommand(command *model.Command) error }); ok { returns.A = hook.RegisterCommand(args.A) + returns.A = encodableError(returns.A) } else { return encodableError(fmt.Errorf("API RegisterCommand called but not implemented.")) } @@ -526,12 +584,43 @@ func (s *apiRPCServer) UnregisterCommand(args *Z_UnregisterCommandArgs, returns UnregisterCommand(teamId, trigger string) error }); ok { returns.A = hook.UnregisterCommand(args.A, args.B) + returns.A = encodableError(returns.A) } else { return encodableError(fmt.Errorf("API UnregisterCommand called but not implemented.")) } return nil } +type Z_ExecuteSlashCommandArgs struct { + A *model.CommandArgs +} + +type Z_ExecuteSlashCommandReturns struct { + A *model.CommandResponse + B error +} + +func (g *apiRPCClient) ExecuteSlashCommand(commandArgs *model.CommandArgs) (*model.CommandResponse, error) { + _args := &Z_ExecuteSlashCommandArgs{commandArgs} + _returns := &Z_ExecuteSlashCommandReturns{} + if err := g.client.Call("Plugin.ExecuteSlashCommand", _args, _returns); err != nil { + log.Printf("RPC call to ExecuteSlashCommand API failed: %s", err.Error()) + } + return _returns.A, _returns.B +} + +func (s *apiRPCServer) ExecuteSlashCommand(args *Z_ExecuteSlashCommandArgs, returns *Z_ExecuteSlashCommandReturns) error { + if hook, ok := s.impl.(interface { + ExecuteSlashCommand(commandArgs *model.CommandArgs) (*model.CommandResponse, error) + }); ok { + returns.A, returns.B = hook.ExecuteSlashCommand(args.A) + returns.B = encodableError(returns.B) + } else { + return encodableError(fmt.Errorf("API ExecuteSlashCommand called but not implemented.")) + } + return nil +} + type Z_GetSessionArgs struct { A string } @@ -588,6 +677,33 @@ func (s *apiRPCServer) GetConfig(args *Z_GetConfigArgs, returns *Z_GetConfigRetu return nil } +type Z_GetUnsanitizedConfigArgs struct { +} + +type Z_GetUnsanitizedConfigReturns struct { + A *model.Config +} + +func (g *apiRPCClient) GetUnsanitizedConfig() *model.Config { + _args := &Z_GetUnsanitizedConfigArgs{} + _returns := &Z_GetUnsanitizedConfigReturns{} + if err := g.client.Call("Plugin.GetUnsanitizedConfig", _args, _returns); err != nil { + log.Printf("RPC call to GetUnsanitizedConfig API failed: %s", err.Error()) + } + return _returns.A +} + +func (s *apiRPCServer) GetUnsanitizedConfig(args *Z_GetUnsanitizedConfigArgs, returns *Z_GetUnsanitizedConfigReturns) error { + if hook, ok := s.impl.(interface { + GetUnsanitizedConfig() *model.Config + }); ok { + returns.A = hook.GetUnsanitizedConfig() + } else { + return encodableError(fmt.Errorf("API GetUnsanitizedConfig called but not implemented.")) + } + return nil +} + type Z_SaveConfigArgs struct { A *model.Config } @@ -693,6 +809,7 @@ func (s *apiRPCServer) GetBundlePath(args *Z_GetBundlePathArgs, returns *Z_GetBu GetBundlePath() (string, error) }); ok { returns.A, returns.B = hook.GetBundlePath() + returns.B = encodableError(returns.B) } else { return encodableError(fmt.Errorf("API GetBundlePath called but not implemented.")) } @@ -808,6 +925,33 @@ func (s *apiRPCServer) GetDiagnosticId(args *Z_GetDiagnosticIdArgs, returns *Z_G return nil } +type Z_GetTelemetryIdArgs struct { +} + +type Z_GetTelemetryIdReturns struct { + A string +} + +func (g *apiRPCClient) GetTelemetryId() string { + _args := &Z_GetTelemetryIdArgs{} + _returns := &Z_GetTelemetryIdReturns{} + if err := g.client.Call("Plugin.GetTelemetryId", _args, _returns); err != nil { + log.Printf("RPC call to GetTelemetryId API failed: %s", err.Error()) + } + return _returns.A +} + +func (s *apiRPCServer) GetTelemetryId(args *Z_GetTelemetryIdArgs, returns *Z_GetTelemetryIdReturns) error { + if hook, ok := s.impl.(interface { + GetTelemetryId() string + }); ok { + returns.A = hook.GetTelemetryId() + } else { + return encodableError(fmt.Errorf("API GetTelemetryId called but not implemented.")) + } + return nil +} + type Z_CreateUserArgs struct { A *model.User } @@ -1041,6 +1185,93 @@ func (s *apiRPCServer) GetUsersInTeam(args *Z_GetUsersInTeamArgs, returns *Z_Get return nil } +type Z_GetPreferencesForUserArgs struct { + A string +} + +type Z_GetPreferencesForUserReturns struct { + A []model.Preference + B *model.AppError +} + +func (g *apiRPCClient) GetPreferencesForUser(userId string) ([]model.Preference, *model.AppError) { + _args := &Z_GetPreferencesForUserArgs{userId} + _returns := &Z_GetPreferencesForUserReturns{} + if err := g.client.Call("Plugin.GetPreferencesForUser", _args, _returns); err != nil { + log.Printf("RPC call to GetPreferencesForUser API failed: %s", err.Error()) + } + return _returns.A, _returns.B +} + +func (s *apiRPCServer) GetPreferencesForUser(args *Z_GetPreferencesForUserArgs, returns *Z_GetPreferencesForUserReturns) error { + if hook, ok := s.impl.(interface { + GetPreferencesForUser(userId string) ([]model.Preference, *model.AppError) + }); ok { + returns.A, returns.B = hook.GetPreferencesForUser(args.A) + } else { + return encodableError(fmt.Errorf("API GetPreferencesForUser called but not implemented.")) + } + return nil +} + +type Z_UpdatePreferencesForUserArgs struct { + A string + B []model.Preference +} + +type Z_UpdatePreferencesForUserReturns struct { + A *model.AppError +} + +func (g *apiRPCClient) UpdatePreferencesForUser(userId string, preferences []model.Preference) *model.AppError { + _args := &Z_UpdatePreferencesForUserArgs{userId, preferences} + _returns := &Z_UpdatePreferencesForUserReturns{} + if err := g.client.Call("Plugin.UpdatePreferencesForUser", _args, _returns); err != nil { + log.Printf("RPC call to UpdatePreferencesForUser API failed: %s", err.Error()) + } + return _returns.A +} + +func (s *apiRPCServer) UpdatePreferencesForUser(args *Z_UpdatePreferencesForUserArgs, returns *Z_UpdatePreferencesForUserReturns) error { + if hook, ok := s.impl.(interface { + UpdatePreferencesForUser(userId string, preferences []model.Preference) *model.AppError + }); ok { + returns.A = hook.UpdatePreferencesForUser(args.A, args.B) + } else { + return encodableError(fmt.Errorf("API UpdatePreferencesForUser called but not implemented.")) + } + return nil +} + +type Z_DeletePreferencesForUserArgs struct { + A string + B []model.Preference +} + +type Z_DeletePreferencesForUserReturns struct { + A *model.AppError +} + +func (g *apiRPCClient) DeletePreferencesForUser(userId string, preferences []model.Preference) *model.AppError { + _args := &Z_DeletePreferencesForUserArgs{userId, preferences} + _returns := &Z_DeletePreferencesForUserReturns{} + if err := g.client.Call("Plugin.DeletePreferencesForUser", _args, _returns); err != nil { + log.Printf("RPC call to DeletePreferencesForUser API failed: %s", err.Error()) + } + return _returns.A +} + +func (s *apiRPCServer) DeletePreferencesForUser(args *Z_DeletePreferencesForUserArgs, returns *Z_DeletePreferencesForUserReturns) error { + if hook, ok := s.impl.(interface { + DeletePreferencesForUser(userId string, preferences []model.Preference) *model.AppError + }); ok { + returns.A = hook.DeletePreferencesForUser(args.A, args.B) + } else { + return encodableError(fmt.Errorf("API DeletePreferencesForUser called but not implemented.")) + } + return nil +} + type Z_GetTeamIconArgs struct { A string } @@ -1655,6 +1886,37 @@ func (s *apiRPCServer) CreateTeamMembers(args *Z_CreateTeamMembersArgs, returns return nil } +type Z_CreateTeamMembersGracefullyArgs struct { + A string + B []string + C string +} + +type Z_CreateTeamMembersGracefullyReturns struct { + A []*model.TeamMemberWithError + B *model.AppError +} + +func (g *apiRPCClient) CreateTeamMembersGracefully(teamId string, userIds []string, requestorId string) ([]*model.TeamMemberWithError, *model.AppError) { + _args := &Z_CreateTeamMembersGracefullyArgs{teamId, userIds, requestorId} + _returns := &Z_CreateTeamMembersGracefullyReturns{} + if err := g.client.Call("Plugin.CreateTeamMembersGracefully", _args, _returns); err != nil { + log.Printf("RPC call to CreateTeamMembersGracefully API failed: %s", err.Error()) + } + return _returns.A, _returns.B +} + +func (s *apiRPCServer) CreateTeamMembersGracefully(args *Z_CreateTeamMembersGracefullyArgs, returns *Z_CreateTeamMembersGracefullyReturns) error { + if hook, ok := s.impl.(interface { + CreateTeamMembersGracefully(teamId string, userIds []string, requestorId string) ([]*model.TeamMemberWithError, *model.AppError) + }); ok { + returns.A, returns.B = hook.CreateTeamMembersGracefully(args.A, args.B, args.C) + } else { + return encodableError(fmt.Errorf("API CreateTeamMembersGracefully called but not implemented.")) + } + return nil +} + type Z_DeleteTeamMemberArgs struct { A string B string @@ -2224,6 +2486,37 @@ func (s *apiRPCServer) SearchPostsInTeam(args *Z_SearchPostsInTeamArgs, returns return nil } +type Z_SearchPostsInTeamForUserArgs struct { + A string + B string + C model.SearchParameter +} + +type Z_SearchPostsInTeamForUserReturns struct { + A *model.PostSearchResults + B *model.AppError +} + +func (g *apiRPCClient) SearchPostsInTeamForUser(teamId string, userId string, searchParams model.SearchParameter) (*model.PostSearchResults, *model.AppError) { + _args := &Z_SearchPostsInTeamForUserArgs{teamId, userId, searchParams} + _returns := &Z_SearchPostsInTeamForUserReturns{} + if err := g.client.Call("Plugin.SearchPostsInTeamForUser", _args, _returns); err != nil { + log.Printf("RPC call to SearchPostsInTeamForUser API failed: %s", err.Error()) + } + return _returns.A, _returns.B +} + +func (s *apiRPCServer) SearchPostsInTeamForUser(args *Z_SearchPostsInTeamForUserArgs, returns *Z_SearchPostsInTeamForUserReturns) error { + if hook, ok := s.impl.(interface { + SearchPostsInTeamForUser(teamId string, userId string, searchParams model.SearchParameter) (*model.PostSearchResults, *model.AppError) + }); ok { + returns.A, returns.B = hook.SearchPostsInTeamForUser(args.A, args.B, args.C) + } else { + return encodableError(fmt.Errorf("API SearchPostsInTeamForUser called but not implemented.")) + } + return nil +} + type Z_AddChannelMemberArgs struct { A string B string @@ -2254,6 +2547,37 @@ func (s *apiRPCServer) AddChannelMember(args *Z_AddChannelMemberArgs, returns *Z return nil } +type Z_AddUserToChannelArgs struct { + A string + B string + C string +} + +type Z_AddUserToChannelReturns struct { + A *model.ChannelMember + B *model.AppError +} + +func (g *apiRPCClient) AddUserToChannel(channelId, userId, asUserId string) (*model.ChannelMember, *model.AppError) { + _args := &Z_AddUserToChannelArgs{channelId, userId, asUserId} + _returns := &Z_AddUserToChannelReturns{} + if err := g.client.Call("Plugin.AddUserToChannel", _args, _returns); err != nil { + log.Printf("RPC call to AddUserToChannel API failed: %s", err.Error()) + } + return _returns.A, _returns.B +} + +func (s *apiRPCServer) AddUserToChannel(args *Z_AddUserToChannelArgs, returns *Z_AddUserToChannelReturns) error { + if hook, ok := s.impl.(interface { + AddUserToChannel(channelId, userId, asUserId string) (*model.ChannelMember, *model.AppError) + }); ok { + returns.A, returns.B = hook.AddUserToChannel(args.A, args.B, args.C) + } else { + return encodableError(fmt.Errorf("API AddUserToChannel called but not implemented.")) + } + return nil +} + type Z_GetChannelMemberArgs struct { A string B string @@ -2439,95 +2763,182 @@ func (s *apiRPCServer) UpdateChannelMemberNotifications(args *Z_UpdateChannelMem return nil } -type Z_DeleteChannelMemberArgs struct { +type Z_GetGroupArgs struct { A string - B string } -type Z_DeleteChannelMemberReturns struct { - A *model.AppError +type Z_GetGroupReturns struct { + A *model.Group + B *model.AppError } -func (g *apiRPCClient) DeleteChannelMember(channelId, userId string) *model.AppError { - _args := &Z_DeleteChannelMemberArgs{channelId, userId} - _returns := &Z_DeleteChannelMemberReturns{} - if err := g.client.Call("Plugin.DeleteChannelMember", _args, _returns); err != nil { - log.Printf("RPC call to DeleteChannelMember API failed: %s", err.Error()) +func (g *apiRPCClient) GetGroup(groupId string) (*model.Group, *model.AppError) { + _args := &Z_GetGroupArgs{groupId} + _returns := &Z_GetGroupReturns{} + if err := g.client.Call("Plugin.GetGroup", _args, _returns); err != nil { + log.Printf("RPC call to GetGroup API failed: %s", err.Error()) } - return _returns.A + return _returns.A, _returns.B } -func (s *apiRPCServer) DeleteChannelMember(args *Z_DeleteChannelMemberArgs, returns *Z_DeleteChannelMemberReturns) error { +func (s *apiRPCServer) GetGroup(args *Z_GetGroupArgs, returns *Z_GetGroupReturns) error { if hook, ok := s.impl.(interface { - DeleteChannelMember(channelId, userId string) *model.AppError + GetGroup(groupId string) (*model.Group, *model.AppError) }); ok { - returns.A = hook.DeleteChannelMember(args.A, args.B) + returns.A, returns.B = hook.GetGroup(args.A) } else { - return encodableError(fmt.Errorf("API DeleteChannelMember called but not implemented.")) + return encodableError(fmt.Errorf("API GetGroup called but not implemented.")) } return nil } -type Z_CreatePostArgs struct { - A *model.Post +type Z_GetGroupByNameArgs struct { + A string } -type Z_CreatePostReturns struct { - A *model.Post +type Z_GetGroupByNameReturns struct { + A *model.Group B *model.AppError } -func (g *apiRPCClient) CreatePost(post *model.Post) (*model.Post, *model.AppError) { - _args := &Z_CreatePostArgs{post} - _returns := &Z_CreatePostReturns{} - if err := g.client.Call("Plugin.CreatePost", _args, _returns); err != nil { - log.Printf("RPC call to CreatePost API failed: %s", err.Error()) +func (g *apiRPCClient) GetGroupByName(name string) (*model.Group, *model.AppError) { + _args := &Z_GetGroupByNameArgs{name} + _returns := &Z_GetGroupByNameReturns{} + if err := g.client.Call("Plugin.GetGroupByName", _args, _returns); err != nil { + log.Printf("RPC call to GetGroupByName API failed: %s", err.Error()) } return _returns.A, _returns.B } -func (s *apiRPCServer) CreatePost(args *Z_CreatePostArgs, returns *Z_CreatePostReturns) error { +func (s *apiRPCServer) GetGroupByName(args *Z_GetGroupByNameArgs, returns *Z_GetGroupByNameReturns) error { if hook, ok := s.impl.(interface { - CreatePost(post *model.Post) (*model.Post, *model.AppError) + GetGroupByName(name string) (*model.Group, *model.AppError) }); ok { - returns.A, returns.B = hook.CreatePost(args.A) + returns.A, returns.B = hook.GetGroupByName(args.A) } else { - return encodableError(fmt.Errorf("API CreatePost called but not implemented.")) + return encodableError(fmt.Errorf("API GetGroupByName called but not implemented.")) } return nil } -type Z_AddReactionArgs struct { - A *model.Reaction +type Z_GetGroupsForUserArgs struct { + A string } -type Z_AddReactionReturns struct { - A *model.Reaction +type Z_GetGroupsForUserReturns struct { + A []*model.Group B *model.AppError } -func (g *apiRPCClient) AddReaction(reaction *model.Reaction) (*model.Reaction, *model.AppError) { - _args := &Z_AddReactionArgs{reaction} - _returns := &Z_AddReactionReturns{} - if err := g.client.Call("Plugin.AddReaction", _args, _returns); err != nil { - log.Printf("RPC call to AddReaction API failed: %s", err.Error()) +func (g *apiRPCClient) GetGroupsForUser(userId string) ([]*model.Group, *model.AppError) { + _args := &Z_GetGroupsForUserArgs{userId} + _returns := &Z_GetGroupsForUserReturns{} + if err := g.client.Call("Plugin.GetGroupsForUser", _args, _returns); err != nil { + log.Printf("RPC call to GetGroupsForUser API failed: %s", err.Error()) } return _returns.A, _returns.B } -func (s *apiRPCServer) AddReaction(args *Z_AddReactionArgs, returns *Z_AddReactionReturns) error { +func (s *apiRPCServer) GetGroupsForUser(args *Z_GetGroupsForUserArgs, returns *Z_GetGroupsForUserReturns) error { if hook, ok := s.impl.(interface { - AddReaction(reaction *model.Reaction) (*model.Reaction, *model.AppError) + GetGroupsForUser(userId string) ([]*model.Group, *model.AppError) }); ok { - returns.A, returns.B = hook.AddReaction(args.A) + returns.A, returns.B = hook.GetGroupsForUser(args.A) } else { - return encodableError(fmt.Errorf("API AddReaction called but not implemented.")) + return encodableError(fmt.Errorf("API GetGroupsForUser called but not implemented.")) } return nil } -type Z_RemoveReactionArgs struct { - A *model.Reaction +type Z_DeleteChannelMemberArgs struct { + A string + B string +} + +type Z_DeleteChannelMemberReturns struct { + A *model.AppError +} + +func (g *apiRPCClient) DeleteChannelMember(channelId, userId string) *model.AppError { + _args := &Z_DeleteChannelMemberArgs{channelId, userId} + _returns := &Z_DeleteChannelMemberReturns{} + if err := g.client.Call("Plugin.DeleteChannelMember", _args, _returns); err != nil { + log.Printf("RPC call to DeleteChannelMember API failed: %s", err.Error()) + } + return _returns.A +} + +func (s *apiRPCServer) DeleteChannelMember(args *Z_DeleteChannelMemberArgs, returns *Z_DeleteChannelMemberReturns) error { + if hook, ok := s.impl.(interface { + DeleteChannelMember(channelId, userId string) *model.AppError + }); ok { + returns.A = hook.DeleteChannelMember(args.A, args.B) + } else { + return encodableError(fmt.Errorf("API DeleteChannelMember called but not implemented.")) + } + return nil +} + +type Z_CreatePostArgs struct { + A *model.Post +} + +type Z_CreatePostReturns struct { + A *model.Post + B *model.AppError +} + +func (g *apiRPCClient) CreatePost(post *model.Post) (*model.Post, *model.AppError) { + _args := &Z_CreatePostArgs{post} + _returns := &Z_CreatePostReturns{} + if err := g.client.Call("Plugin.CreatePost", _args, _returns); err != nil { + log.Printf("RPC call to CreatePost API failed: %s", err.Error()) + } + return _returns.A, _returns.B +} + +func (s *apiRPCServer) CreatePost(args *Z_CreatePostArgs, returns *Z_CreatePostReturns) error { + if hook, ok := s.impl.(interface { + CreatePost(post *model.Post) (*model.Post, *model.AppError) + }); ok { + returns.A, returns.B = hook.CreatePost(args.A) + } else { + return encodableError(fmt.Errorf("API CreatePost called but not implemented.")) + } + return nil +} + +type Z_AddReactionArgs struct { + A *model.Reaction +} + +type Z_AddReactionReturns struct { + A *model.Reaction + B *model.AppError +} + +func (g *apiRPCClient) AddReaction(reaction *model.Reaction) (*model.Reaction, *model.AppError) { + _args := &Z_AddReactionArgs{reaction} + _returns := &Z_AddReactionReturns{} + if err := g.client.Call("Plugin.AddReaction", _args, _returns); err != nil { + log.Printf("RPC call to AddReaction API failed: %s", err.Error()) + } + return _returns.A, _returns.B +} + +func (s *apiRPCServer) AddReaction(args *Z_AddReactionArgs, returns *Z_AddReactionReturns) error { + if hook, ok := s.impl.(interface { + AddReaction(reaction *model.Reaction) (*model.Reaction, *model.AppError) + }); ok { + returns.A, returns.B = hook.AddReaction(args.A) + } else { + return encodableError(fmt.Errorf("API AddReaction called but not implemented.")) + } + return nil +} + +type Z_RemoveReactionArgs struct { + A *model.Reaction } type Z_RemoveReactionReturns struct { @@ -3144,6 +3555,37 @@ func (s *apiRPCServer) GetFileInfo(args *Z_GetFileInfoArgs, returns *Z_GetFileIn return nil } +type Z_GetFileInfosArgs struct { + A int + B int + C *model.GetFileInfosOptions +} + +type Z_GetFileInfosReturns struct { + A []*model.FileInfo + B *model.AppError +} + +func (g *apiRPCClient) GetFileInfos(page, perPage int, opt *model.GetFileInfosOptions) ([]*model.FileInfo, *model.AppError) { + _args := &Z_GetFileInfosArgs{page, perPage, opt} + _returns := &Z_GetFileInfosReturns{} + if err := g.client.Call("Plugin.GetFileInfos", _args, _returns); err != nil { + log.Printf("RPC call to GetFileInfos API failed: %s", err.Error()) + } + return _returns.A, _returns.B +} + +func (s *apiRPCServer) GetFileInfos(args *Z_GetFileInfosArgs, returns *Z_GetFileInfosReturns) error { + if hook, ok := s.impl.(interface { + GetFileInfos(page, perPage int, opt *model.GetFileInfosOptions) ([]*model.FileInfo, *model.AppError) + }); ok { + returns.A, returns.B = hook.GetFileInfos(args.A, args.B, args.C) + } else { + return encodableError(fmt.Errorf("API GetFileInfos called but not implemented.")) + } + return nil +} + type Z_GetFileArgs struct { A string } @@ -3521,6 +3963,67 @@ func (s *apiRPCServer) KVCompareAndSet(args *Z_KVCompareAndSetArgs, returns *Z_K return nil } +type Z_KVCompareAndDeleteArgs struct { + A string + B []byte +} + +type Z_KVCompareAndDeleteReturns struct { + A bool + B *model.AppError +} + +func (g *apiRPCClient) KVCompareAndDelete(key string, oldValue []byte) (bool, *model.AppError) { + _args := &Z_KVCompareAndDeleteArgs{key, oldValue} + _returns := &Z_KVCompareAndDeleteReturns{} + if err := g.client.Call("Plugin.KVCompareAndDelete", _args, _returns); err != nil { + log.Printf("RPC call to KVCompareAndDelete API failed: %s", err.Error()) + } + return _returns.A, _returns.B +} + +func (s *apiRPCServer) KVCompareAndDelete(args *Z_KVCompareAndDeleteArgs, returns *Z_KVCompareAndDeleteReturns) error { + if hook, ok := s.impl.(interface { + KVCompareAndDelete(key string, oldValue []byte) (bool, *model.AppError) + }); ok { + returns.A, returns.B = hook.KVCompareAndDelete(args.A, args.B) + } else { + return encodableError(fmt.Errorf("API KVCompareAndDelete called but not implemented.")) + } + return nil +} + +type Z_KVSetWithOptionsArgs struct { + A string + B []byte + C model.PluginKVSetOptions +} + +type Z_KVSetWithOptionsReturns struct { + A bool + B *model.AppError +} + +func (g *apiRPCClient) KVSetWithOptions(key string, value []byte, options model.PluginKVSetOptions) (bool, *model.AppError) { + _args := &Z_KVSetWithOptionsArgs{key, value, options} + _returns := &Z_KVSetWithOptionsReturns{} + if err := g.client.Call("Plugin.KVSetWithOptions", _args, _returns); err != nil { + log.Printf("RPC call to KVSetWithOptions API failed: %s", err.Error()) + } + return _returns.A, _returns.B +} + +func (s *apiRPCServer) KVSetWithOptions(args *Z_KVSetWithOptionsArgs, returns *Z_KVSetWithOptionsReturns) error { + if hook, ok := s.impl.(interface { + KVSetWithOptions(key string, value []byte, options model.PluginKVSetOptions) (bool, *model.AppError) + }); ok { + returns.A, returns.B = hook.KVSetWithOptions(args.A, args.B, args.C) + } else { + return encodableError(fmt.Errorf("API KVSetWithOptions called but not implemented.")) + } + return nil +} + type Z_KVSetWithExpiryArgs struct { A string B []byte @@ -3783,118 +4286,6 @@ func (s *apiRPCServer) HasPermissionToChannel(args *Z_HasPermissionToChannelArgs return nil } -type Z_LogDebugArgs struct { - A string - B []interface{} -} - -type Z_LogDebugReturns struct { -} - -func (g *apiRPCClient) LogDebug(msg string, keyValuePairs ...interface{}) { - _args := &Z_LogDebugArgs{msg, keyValuePairs} - _returns := &Z_LogDebugReturns{} - if err := g.client.Call("Plugin.LogDebug", _args, _returns); err != nil { - log.Printf("RPC call to LogDebug API failed: %s", err.Error()) - } - -} - -func (s *apiRPCServer) LogDebug(args *Z_LogDebugArgs, returns *Z_LogDebugReturns) error { - if hook, ok := s.impl.(interface { - LogDebug(msg string, keyValuePairs ...interface{}) - }); ok { - hook.LogDebug(args.A, args.B...) - } else { - return encodableError(fmt.Errorf("API LogDebug called but not implemented.")) - } - return nil -} - -type Z_LogInfoArgs struct { - A string - B []interface{} -} - -type Z_LogInfoReturns struct { -} - -func (g *apiRPCClient) LogInfo(msg string, keyValuePairs ...interface{}) { - _args := &Z_LogInfoArgs{msg, keyValuePairs} - _returns := &Z_LogInfoReturns{} - if err := g.client.Call("Plugin.LogInfo", _args, _returns); err != nil { - log.Printf("RPC call to LogInfo API failed: %s", err.Error()) - } - -} - -func (s *apiRPCServer) LogInfo(args *Z_LogInfoArgs, returns *Z_LogInfoReturns) error { - if hook, ok := s.impl.(interface { - LogInfo(msg string, keyValuePairs ...interface{}) - }); ok { - hook.LogInfo(args.A, args.B...) - } else { - return encodableError(fmt.Errorf("API LogInfo called but not implemented.")) - } - return nil -} - -type Z_LogErrorArgs struct { - A string - B []interface{} -} - -type Z_LogErrorReturns struct { -} - -func (g *apiRPCClient) LogError(msg string, keyValuePairs ...interface{}) { - _args := &Z_LogErrorArgs{msg, keyValuePairs} - _returns := &Z_LogErrorReturns{} - if err := g.client.Call("Plugin.LogError", _args, _returns); err != nil { - log.Printf("RPC call to LogError API failed: %s", err.Error()) - } - -} - -func (s *apiRPCServer) LogError(args *Z_LogErrorArgs, returns *Z_LogErrorReturns) error { - if hook, ok := s.impl.(interface { - LogError(msg string, keyValuePairs ...interface{}) - }); ok { - hook.LogError(args.A, args.B...) - } else { - return encodableError(fmt.Errorf("API LogError called but not implemented.")) - } - return nil -} - -type Z_LogWarnArgs struct { - A string - B []interface{} -} - -type Z_LogWarnReturns struct { -} - -func (g *apiRPCClient) LogWarn(msg string, keyValuePairs ...interface{}) { - _args := &Z_LogWarnArgs{msg, keyValuePairs} - _returns := &Z_LogWarnReturns{} - if err := g.client.Call("Plugin.LogWarn", _args, _returns); err != nil { - log.Printf("RPC call to LogWarn API failed: %s", err.Error()) - } - -} - -func (s *apiRPCServer) LogWarn(args *Z_LogWarnArgs, returns *Z_LogWarnReturns) error { - if hook, ok := s.impl.(interface { - LogWarn(msg string, keyValuePairs ...interface{}) - }); ok { - hook.LogWarn(args.A, args.B...) - } else { - return encodableError(fmt.Errorf("API LogWarn called but not implemented.")) - } - return nil -} - type Z_SendMailArgs struct { A string B string @@ -4186,3 +4577,272 @@ func (s *apiRPCServer) DeleteBotIconImage(args *Z_DeleteBotIconImageArgs, return } return nil } + +type Z_PublishUserTypingArgs struct { + A string + B string + C string +} + +type Z_PublishUserTypingReturns struct { + A *model.AppError +} + +func (g *apiRPCClient) PublishUserTyping(userId, channelId, parentId string) *model.AppError { + _args := &Z_PublishUserTypingArgs{userId, channelId, parentId} + _returns := &Z_PublishUserTypingReturns{} + if err := g.client.Call("Plugin.PublishUserTyping", _args, _returns); err != nil { + log.Printf("RPC call to PublishUserTyping API failed: %s", err.Error()) + } + return _returns.A +} + +func (s *apiRPCServer) PublishUserTyping(args *Z_PublishUserTypingArgs, returns *Z_PublishUserTypingReturns) error { + if hook, ok := s.impl.(interface { + PublishUserTyping(userId, channelId, parentId string) *model.AppError + }); ok { + returns.A = hook.PublishUserTyping(args.A, args.B, args.C) + } else { + return encodableError(fmt.Errorf("API PublishUserTyping called but not implemented.")) + } + return nil +} + +type Z_CreateCommandArgs struct { + A *model.Command +} + +type Z_CreateCommandReturns struct { + A *model.Command + B error +} + +func (g *apiRPCClient) CreateCommand(cmd *model.Command) (*model.Command, error) { + _args := &Z_CreateCommandArgs{cmd} + _returns := &Z_CreateCommandReturns{} + if err := g.client.Call("Plugin.CreateCommand", _args, _returns); err != nil { + log.Printf("RPC call to CreateCommand API failed: %s", err.Error()) + } + return _returns.A, _returns.B +} + +func (s *apiRPCServer) CreateCommand(args *Z_CreateCommandArgs, returns *Z_CreateCommandReturns) error { + if hook, ok := s.impl.(interface { + CreateCommand(cmd *model.Command) (*model.Command, error) + }); ok { + returns.A, returns.B = hook.CreateCommand(args.A) + returns.B = encodableError(returns.B) + } else { + return encodableError(fmt.Errorf("API CreateCommand called but not implemented.")) + } + return nil +} + +type Z_ListCommandsArgs struct { + A string +} + +type Z_ListCommandsReturns struct { + A []*model.Command + B error +} + +func (g *apiRPCClient) ListCommands(teamID string) ([]*model.Command, error) { + _args := &Z_ListCommandsArgs{teamID} + _returns := &Z_ListCommandsReturns{} + if err := g.client.Call("Plugin.ListCommands", _args, _returns); err != nil { + log.Printf("RPC call to ListCommands API failed: %s", err.Error()) + } + return _returns.A, _returns.B +} + +func (s *apiRPCServer) ListCommands(args *Z_ListCommandsArgs, returns *Z_ListCommandsReturns) error { + if hook, ok := s.impl.(interface { + ListCommands(teamID string) ([]*model.Command, error) + }); ok { + returns.A, returns.B = hook.ListCommands(args.A) + returns.B = encodableError(returns.B) + } else { + return encodableError(fmt.Errorf("API ListCommands called but not implemented.")) + } + return nil +} + +type Z_ListCustomCommandsArgs struct { + A string +} + +type Z_ListCustomCommandsReturns struct { + A []*model.Command + B error +} + +func (g *apiRPCClient) ListCustomCommands(teamID string) ([]*model.Command, error) { + _args := &Z_ListCustomCommandsArgs{teamID} + _returns := &Z_ListCustomCommandsReturns{} + if err := g.client.Call("Plugin.ListCustomCommands", _args, _returns); err != nil { + log.Printf("RPC call to ListCustomCommands API failed: %s", err.Error()) + } + return _returns.A, _returns.B +} + +func (s *apiRPCServer) ListCustomCommands(args *Z_ListCustomCommandsArgs, returns *Z_ListCustomCommandsReturns) error { + if hook, ok := s.impl.(interface { + ListCustomCommands(teamID string) ([]*model.Command, error) + }); ok { + returns.A, returns.B = hook.ListCustomCommands(args.A) + returns.B = encodableError(returns.B) + } else { + return encodableError(fmt.Errorf("API ListCustomCommands called but not implemented.")) + } + return nil +} + +type Z_ListPluginCommandsArgs struct { + A string +} + +type Z_ListPluginCommandsReturns struct { + A []*model.Command + B error +} + +func (g *apiRPCClient) ListPluginCommands(teamID string) ([]*model.Command, error) { + _args := &Z_ListPluginCommandsArgs{teamID} + _returns := &Z_ListPluginCommandsReturns{} + if err := g.client.Call("Plugin.ListPluginCommands", _args, _returns); err != nil { + log.Printf("RPC call to ListPluginCommands API failed: %s", err.Error()) + } + return _returns.A, _returns.B +} + +func (s *apiRPCServer) ListPluginCommands(args *Z_ListPluginCommandsArgs, returns *Z_ListPluginCommandsReturns) error { + if hook, ok := s.impl.(interface { + ListPluginCommands(teamID string) ([]*model.Command, error) + }); ok { + returns.A, returns.B = hook.ListPluginCommands(args.A) + returns.B = encodableError(returns.B) + } else { + return encodableError(fmt.Errorf("API ListPluginCommands called but not implemented.")) + } + return nil +} + +type Z_ListBuiltInCommandsArgs struct { +} + +type Z_ListBuiltInCommandsReturns struct { + A []*model.Command + B error +} + +func (g *apiRPCClient) ListBuiltInCommands() ([]*model.Command, error) { + _args := &Z_ListBuiltInCommandsArgs{} + _returns := &Z_ListBuiltInCommandsReturns{} + if err := g.client.Call("Plugin.ListBuiltInCommands", _args, _returns); err != nil { + log.Printf("RPC call to ListBuiltInCommands API failed: %s", err.Error()) + } + return _returns.A, _returns.B +} + +func (s *apiRPCServer) ListBuiltInCommands(args *Z_ListBuiltInCommandsArgs, returns *Z_ListBuiltInCommandsReturns) error { + if hook, ok := s.impl.(interface { + ListBuiltInCommands() ([]*model.Command, error) + }); ok { + returns.A, returns.B = hook.ListBuiltInCommands() + returns.B = encodableError(returns.B) + } else { + return encodableError(fmt.Errorf("API ListBuiltInCommands called but not implemented.")) + } + return nil +} + +type Z_GetCommandArgs struct { + A string +} + +type Z_GetCommandReturns struct { + A *model.Command + B error +} + +func (g *apiRPCClient) GetCommand(commandID string) (*model.Command, error) { + _args := &Z_GetCommandArgs{commandID} + _returns := &Z_GetCommandReturns{} + if err := g.client.Call("Plugin.GetCommand", _args, _returns); err != nil { + log.Printf("RPC call to GetCommand API failed: %s", err.Error()) + } + return _returns.A, _returns.B +} + +func (s *apiRPCServer) GetCommand(args *Z_GetCommandArgs, returns *Z_GetCommandReturns) error { + if hook, ok := s.impl.(interface { + GetCommand(commandID string) (*model.Command, error) + }); ok { + returns.A, returns.B = hook.GetCommand(args.A) + returns.B = encodableError(returns.B) + } else { + return encodableError(fmt.Errorf("API GetCommand called but not implemented.")) + } + return nil +} + +type Z_UpdateCommandArgs struct { + A string + B *model.Command +} + +type Z_UpdateCommandReturns struct { + A *model.Command + B error +} + +func (g *apiRPCClient) UpdateCommand(commandID string, updatedCmd *model.Command) (*model.Command, error) { + _args := &Z_UpdateCommandArgs{commandID, updatedCmd} + _returns := &Z_UpdateCommandReturns{} + if err := g.client.Call("Plugin.UpdateCommand", _args, _returns); err != nil { + log.Printf("RPC call to UpdateCommand API failed: %s", err.Error()) + } + return _returns.A, _returns.B +} + +func (s *apiRPCServer) UpdateCommand(args *Z_UpdateCommandArgs, returns *Z_UpdateCommandReturns) error { + if hook, ok := s.impl.(interface { + UpdateCommand(commandID string, updatedCmd *model.Command) (*model.Command, error) + }); ok { + returns.A, returns.B = hook.UpdateCommand(args.A, args.B) + returns.B = encodableError(returns.B) + } else { + return encodableError(fmt.Errorf("API UpdateCommand called but not implemented.")) + } + return nil +} + +type Z_DeleteCommandArgs struct { + A string +} + +type Z_DeleteCommandReturns struct { + A error +} + +func (g *apiRPCClient) DeleteCommand(commandID string) error { + _args := &Z_DeleteCommandArgs{commandID} + _returns := &Z_DeleteCommandReturns{} + if err := g.client.Call("Plugin.DeleteCommand", _args, _returns); err != nil { + log.Printf("RPC call to DeleteCommand API failed: %s", err.Error()) + } + return _returns.A +} + +func (s *apiRPCServer) DeleteCommand(args *Z_DeleteCommandArgs, returns *Z_DeleteCommandReturns) error { + if hook, ok := s.impl.(interface { + DeleteCommand(commandID string) error + }); ok { + returns.A = hook.DeleteCommand(args.A) + returns.A = encodableError(returns.A) + } else { + return encodableError(fmt.Errorf("API DeleteCommand called but not implemented.")) + } + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/plugin/context.go b/vendor/github.com/mattermost/mattermost-server/v5/plugin/context.go similarity index 95% rename from vendor/github.com/mattermost/mattermost-server/plugin/context.go rename to vendor/github.com/mattermost/mattermost-server/v5/plugin/context.go index 288739e..98e786e 100644 --- a/vendor/github.com/mattermost/mattermost-server/plugin/context.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/plugin/context.go @@ -12,4 +12,5 @@ type Context struct { IpAddress string AcceptLanguage string UserAgent string + SourcePluginId string } diff --git a/vendor/github.com/mattermost/mattermost-server/plugin/doc.go b/vendor/github.com/mattermost/mattermost-server/v5/plugin/doc.go similarity index 100% rename from vendor/github.com/mattermost/mattermost-server/plugin/doc.go rename to vendor/github.com/mattermost/mattermost-server/v5/plugin/doc.go diff --git a/vendor/github.com/mattermost/mattermost-server/plugin/environment.go b/vendor/github.com/mattermost/mattermost-server/v5/plugin/environment.go similarity index 62% rename from vendor/github.com/mattermost/mattermost-server/plugin/environment.go rename to vendor/github.com/mattermost/mattermost-server/v5/plugin/environment.go index d301a89..c69be7f 100644 --- a/vendor/github.com/mattermost/mattermost-server/plugin/environment.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/plugin/environment.go @@ -12,12 +12,16 @@ import ( "sync" "time" - "github.com/mattermost/mattermost-server/mlog" - "github.com/mattermost/mattermost-server/model" - "github.com/mattermost/mattermost-server/utils" "github.com/pkg/errors" + + "github.com/mattermost/mattermost-server/v5/einterfaces" + "github.com/mattermost/mattermost-server/v5/mlog" + "github.com/mattermost/mattermost-server/v5/model" + "github.com/mattermost/mattermost-server/v5/utils" ) +var ErrNotFound = errors.New("Item not found") + type apiImplCreatorFunc func(*model.Manifest) API // registeredPlugin stores the state for a given plugin that has been activated @@ -27,11 +31,17 @@ type apiImplCreatorFunc func(*model.Manifest) API // plugin is configured as disabled and has not been activated during this server run. type registeredPlugin struct { BundleInfo *model.BundleInfo - State *int + State int + + supervisor *supervisor +} - failTimeStamps []time.Time - lastError error - supervisor *supervisor +// PrepackagedPlugin is a plugin prepackaged with the server and found on startup. +type PrepackagedPlugin struct { + Path string + IconData string + Manifest *model.Manifest + Signature []byte } // Environment represents the execution environment of active plugins. @@ -39,17 +49,21 @@ type registeredPlugin struct { // It is meant for use by the Mattermost server to manipulate, interact with and report on the set // of active plugins. type Environment struct { - registeredPlugins sync.Map - pluginHealthCheckJob *PluginHealthCheckJob - logger *mlog.Logger - newAPIImpl apiImplCreatorFunc - pluginDir string - webappPluginDir string + registeredPlugins sync.Map + pluginHealthCheckJob *PluginHealthCheckJob + logger *mlog.Logger + metrics einterfaces.MetricsInterface + newAPIImpl apiImplCreatorFunc + pluginDir string + webappPluginDir string + prepackagedPlugins []*PrepackagedPlugin + prepackagedPluginsLock sync.RWMutex } -func NewEnvironment(newAPIImpl apiImplCreatorFunc, pluginDir string, webappPluginDir string, logger *mlog.Logger) (*Environment, error) { +func NewEnvironment(newAPIImpl apiImplCreatorFunc, pluginDir string, webappPluginDir string, logger *mlog.Logger, metrics einterfaces.MetricsInterface) (*Environment, error) { return &Environment{ logger: logger, + metrics: metrics, newAPIImpl: newAPIImpl, pluginDir: pluginDir, webappPluginDir: webappPluginDir, @@ -73,7 +87,8 @@ func scanSearchPath(path string) ([]*model.BundleInfo, error) { if !file.IsDir() || file.Name()[0] == '.' { continue } - if info := model.BundleInfoForPath(filepath.Join(path, file.Name())); info.ManifestPath != "" { + info := model.BundleInfoForPath(filepath.Join(path, file.Name())) + if info.Manifest != nil { ret = append(ret, info) } } @@ -85,11 +100,21 @@ func (env *Environment) Available() ([]*model.BundleInfo, error) { return scanSearchPath(env.pluginDir) } +// Returns a list of prepackaged plugins available in the local prepackaged_plugins folder. +// The list content is immutable and should not be modified. +func (env *Environment) PrepackagedPlugins() []*PrepackagedPlugin { + env.prepackagedPluginsLock.RLock() + defer env.prepackagedPluginsLock.RUnlock() + + return env.prepackagedPlugins +} + // Returns a list of all currently active plugins within the environment. +// The returned list should not be modified. func (env *Environment) Active() []*model.BundleInfo { activePlugins := []*model.BundleInfo{} env.registeredPlugins.Range(func(key, value interface{}) bool { - plugin := value.(*registeredPlugin) + plugin := value.(registeredPlugin) if env.IsActive(plugin.BundleInfo.Manifest.Id) { activePlugins = append(activePlugins, plugin.BundleInfo) } @@ -112,20 +137,22 @@ func (env *Environment) GetPluginState(id string) int { return model.PluginStateNotRunning } - return *rp.(*registeredPlugin).State + return rp.(registeredPlugin).State } -// SetPluginState sets the current state of a plugin (disabled, running, or error) -func (env *Environment) SetPluginState(id string, state int) { +// setPluginState sets the current state of a plugin (disabled, running, or error) +func (env *Environment) setPluginState(id string, state int) { if rp, ok := env.registeredPlugins.Load(id); ok { - *rp.(*registeredPlugin).State = state + p := rp.(registeredPlugin) + p.State = state + env.registeredPlugins.Store(id, p) } } // PublicFilesPath returns a path and true if the plugin with the given id is active. // It returns an empty string and false if the path is not set or invalid func (env *Environment) PublicFilesPath(id string) (string, error) { - if _, ok := env.registeredPlugins.Load(id); !ok { + if !env.IsActive(id) { return "", fmt.Errorf("plugin not found: %v", id) } return filepath.Join(env.pluginDir, id, "public"), nil @@ -162,6 +189,23 @@ func (env *Environment) Statuses() (model.PluginStatuses, error) { return pluginStatuses, nil } +// GetManifest returns a manifest for a given pluginId. +// Returns ErrNotFound if plugin is not found. +func (env *Environment) GetManifest(pluginId string) (*model.Manifest, error) { + plugins, err := env.Available() + if err != nil { + return nil, errors.Wrap(err, "unable to get plugin statuses") + } + + for _, plugin := range plugins { + if plugin.Manifest != nil && plugin.Manifest.Id == pluginId { + return plugin.Manifest, nil + } + } + + return nil, ErrNotFound +} + func (env *Environment) Activate(id string) (manifest *model.Manifest, activated bool, reterr error) { // Check if we are already active if env.IsActive(id) { @@ -185,22 +229,14 @@ func (env *Environment) Activate(id string) (manifest *model.Manifest, activated return nil, false, fmt.Errorf("plugin not found: %v", id) } - value, ok := env.registeredPlugins.Load(id) - if !ok { - value = newRegisteredPlugin(pluginInfo) - env.registeredPlugins.Store(id, value) - } - - rp := value.(*registeredPlugin) - - // Store latest BundleInfo in case something has changed since last activation - rp.BundleInfo = pluginInfo + rp := newRegisteredPlugin(pluginInfo) + env.registeredPlugins.Store(id, rp) defer func() { if reterr == nil { - env.SetPluginState(id, model.PluginStateRunning) + env.setPluginState(id, model.PluginStateRunning) } else { - env.SetPluginState(id, model.PluginStateFailedToStart) + env.setPluginState(id, model.PluginStateFailedToStart) } }() @@ -227,11 +263,17 @@ func (env *Environment) Activate(id string) (manifest *model.Manifest, activated } if pluginInfo.Manifest.HasServer() { - sup, err := newSupervisor(pluginInfo, env.logger, env.newAPIImpl(pluginInfo.Manifest)) + sup, err := newSupervisor(pluginInfo, env.newAPIImpl(pluginInfo.Manifest), env.logger, env.metrics) if err != nil { return nil, false, errors.Wrapf(err, "unable to start plugin: %v", id) } + + if err := sup.Hooks().OnActivate(); err != nil { + sup.Shutdown() + return nil, false, err + } rp.supervisor = sup + env.registeredPlugins.Store(id, rp) componentActivated = true } @@ -258,13 +300,13 @@ func (env *Environment) Deactivate(id string) bool { isActive := env.IsActive(id) - env.SetPluginState(id, model.PluginStateNotRunning) + env.setPluginState(id, model.PluginStateNotRunning) if !isActive { return false } - rp := p.(*registeredPlugin) + rp := p.(registeredPlugin) if rp.supervisor != nil { if err := rp.supervisor.Hooks().OnDeactivate(); err != nil { env.logger.Error("Plugin OnDeactivate() error", mlog.String("plugin_id", rp.BundleInfo.Manifest.Id), mlog.Err(err)) @@ -284,16 +326,46 @@ func (env *Environment) RestartPlugin(id string) error { // Shutdown deactivates all plugins and gracefully shuts down the environment. func (env *Environment) Shutdown() { + if env.pluginHealthCheckJob != nil { + env.pluginHealthCheckJob.Cancel() + } + + var wg sync.WaitGroup env.registeredPlugins.Range(func(key, value interface{}) bool { - rp := value.(*registeredPlugin) + rp := value.(registeredPlugin) + + if rp.supervisor == nil || !env.IsActive(rp.BundleInfo.Manifest.Id) { + return true + } + + wg.Add(1) - if rp.supervisor != nil { + done := make(chan bool) + go func() { + defer close(done) if err := rp.supervisor.Hooks().OnDeactivate(); err != nil { env.logger.Error("Plugin OnDeactivate() error", mlog.String("plugin_id", rp.BundleInfo.Manifest.Id), mlog.Err(err)) } + }() + + go func() { + defer wg.Done() + + select { + case <-time.After(10 * time.Second): + env.logger.Warn("Plugin OnDeactivate() failed to complete in 10 seconds", mlog.String("plugin_id", rp.BundleInfo.Manifest.Id)) + case <-done: + } + rp.supervisor.Shutdown() - } + }() + + return true + }) + wg.Wait() + + env.registeredPlugins.Range(func(key, value interface{}) bool { env.registeredPlugins.Delete(key) return true @@ -362,8 +434,8 @@ func (env *Environment) UnpackWebappBundle(id string) (*model.Manifest, error) { // Consider using RunMultiPluginHook instead. func (env *Environment) HooksForPlugin(id string) (Hooks, error) { if p, ok := env.registeredPlugins.Load(id); ok { - rp := p.(*registeredPlugin) - if rp.supervisor != nil { + rp := p.(registeredPlugin) + if rp.supervisor != nil && env.IsActive(id) { return rp.supervisor.Hooks(), nil } } @@ -371,26 +443,85 @@ func (env *Environment) HooksForPlugin(id string) (Hooks, error) { return nil, fmt.Errorf("plugin not found: %v", id) } -// RunMultiPluginHook invokes hookRunnerFunc for each plugin that implements the given hookId. +// RunMultiPluginHook invokes hookRunnerFunc for each active plugin that implements the given hookId. // // If hookRunnerFunc returns false, iteration will not continue. The iteration order among active // plugins is not specified. func (env *Environment) RunMultiPluginHook(hookRunnerFunc func(hooks Hooks) bool, hookId int) { + startTime := time.Now() + env.registeredPlugins.Range(func(key, value interface{}) bool { - rp := value.(*registeredPlugin) + rp := value.(registeredPlugin) - if rp.supervisor == nil || !rp.supervisor.Implements(hookId) { + if rp.supervisor == nil || !rp.supervisor.Implements(hookId) || !env.IsActive(rp.BundleInfo.Manifest.Id) { return true } - if !hookRunnerFunc(rp.supervisor.Hooks()) { - return false + + hookStartTime := time.Now() + result := hookRunnerFunc(rp.supervisor.Hooks()) + + if env.metrics != nil { + elapsedTime := float64(time.Since(hookStartTime)) / float64(time.Second) + env.metrics.ObservePluginMultiHookIterationDuration(rp.BundleInfo.Manifest.Id, elapsedTime) } - return true + return result }) + + if env.metrics != nil { + elapsedTime := float64(time.Since(startTime)) / float64(time.Second) + env.metrics.ObservePluginMultiHookDuration(elapsedTime) + } +} + +// PerformHealthCheck uses the active plugin's supervisor to verify if the plugin has crashed. +func (env *Environment) PerformHealthCheck(id string) error { + p, ok := env.registeredPlugins.Load(id) + if !ok { + return nil + } + rp := p.(registeredPlugin) + + sup := rp.supervisor + if sup == nil { + return nil + } + return sup.PerformHealthCheck() +} + +// SetPrepackagedPlugins saves prepackaged plugins in the environment. +func (env *Environment) SetPrepackagedPlugins(plugins []*PrepackagedPlugin) { + env.prepackagedPluginsLock.Lock() + env.prepackagedPlugins = plugins + env.prepackagedPluginsLock.Unlock() } -func newRegisteredPlugin(bundle *model.BundleInfo) *registeredPlugin { +func newRegisteredPlugin(bundle *model.BundleInfo) registeredPlugin { state := model.PluginStateNotRunning - return ®isteredPlugin{failTimeStamps: []time.Time{}, State: &state, BundleInfo: bundle} + return registeredPlugin{State: state, BundleInfo: bundle} +} + +// InitPluginHealthCheckJob starts a new job if one is not running and is set to enabled, or kills an existing one if set to disabled. +func (env *Environment) InitPluginHealthCheckJob(enable bool) { + // Config is set to enable. No job exists, start a new job. + if enable && env.pluginHealthCheckJob == nil { + mlog.Debug("Enabling plugin health check job", mlog.Duration("interval_s", HealthCheckInterval)) + + job := newPluginHealthCheckJob(env) + env.pluginHealthCheckJob = job + go job.run() + } + + // Config is set to disable. Job exists, kill existing job. + if !enable && env.pluginHealthCheckJob != nil { + mlog.Debug("Disabling plugin health check job") + + env.pluginHealthCheckJob.Cancel() + env.pluginHealthCheckJob = nil + } +} + +// GetPluginHealthCheckJob returns the configured PluginHealthCheckJob, if any. +func (env *Environment) GetPluginHealthCheckJob() *PluginHealthCheckJob { + return env.pluginHealthCheckJob } diff --git a/vendor/github.com/mattermost/mattermost-server/plugin/hclog_adapter.go b/vendor/github.com/mattermost/mattermost-server/v5/plugin/hclog_adapter.go similarity index 77% rename from vendor/github.com/mattermost/mattermost-server/plugin/hclog_adapter.go rename to vendor/github.com/mattermost/mattermost-server/v5/plugin/hclog_adapter.go index b67b839..e79cee2 100644 --- a/vendor/github.com/mattermost/mattermost-server/plugin/hclog_adapter.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/plugin/hclog_adapter.go @@ -10,7 +10,8 @@ import ( "strings" "github.com/hashicorp/go-hclog" - "github.com/mattermost/mattermost-server/mlog" + + "github.com/mattermost/mattermost-server/v5/mlog" ) type hclogAdapter struct { @@ -18,6 +19,24 @@ type hclogAdapter struct { extrasKey string } +func (h *hclogAdapter) Log(level hclog.Level, msg string, args ...interface{}) { + switch level { + case hclog.Trace: + h.Trace(msg, args...) + case hclog.Debug: + h.Debug(msg, args...) + case hclog.Info: + h.Info(msg, args...) + case hclog.Warn: + h.Warn(msg, args...) + case hclog.Error: + h.Error(msg, args...) + default: + // For unknown/unexpected log level, treat it as an error so we notice and fix the code. + h.Error(msg, args...) + } +} + func (h *hclogAdapter) Trace(msg string, args ...interface{}) { extras := strings.TrimSpace(fmt.Sprint(args...)) if extras != "" { @@ -104,3 +123,11 @@ func (h *hclogAdapter) StandardWriter(opts *hclog.StandardLoggerOptions) io.Writ } func (h *hclogAdapter) SetLevel(hclog.Level) {} + +func (h *hclogAdapter) ImpliedArgs() []interface{} { + return []interface{}{} +} + +func (h *hclogAdapter) Name() string { + return "MattermostPluginLogger" +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/plugin/health_check.go b/vendor/github.com/mattermost/mattermost-server/v5/plugin/health_check.go new file mode 100644 index 0000000..36b4ab8 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/plugin/health_check.go @@ -0,0 +1,124 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package plugin + +import ( + "sync" + "time" + + "github.com/mattermost/mattermost-server/v5/mlog" + "github.com/mattermost/mattermost-server/v5/model" +) + +const ( + HealthCheckInterval = 30 * time.Second // How often the health check should run + HealthCheckDeactivationWindow = 60 * time.Minute // How long we wait for num fails to occur before deactivating the plugin + HealthCheckPingFailLimit = 3 // How many times we call RPC ping in a row before it is considered a failure + HealthCheckNumRestartsLimit = 3 // How many times we restart a plugin before we deactivate it +) + +type PluginHealthCheckJob struct { + cancel chan struct{} + cancelled chan struct{} + cancelOnce sync.Once + env *Environment + failureTimestamps sync.Map +} + +// run continuously performs health checks on all active plugins, on a timer. +func (job *PluginHealthCheckJob) run() { + mlog.Debug("Plugin health check job starting.") + defer close(job.cancelled) + + ticker := time.NewTicker(HealthCheckInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + activePlugins := job.env.Active() + for _, plugin := range activePlugins { + job.CheckPlugin(plugin.Manifest.Id) + } + case <-job.cancel: + return + } + } +} + +// CheckPlugin determines the plugin's health status, then handles the error or success case. +// If the plugin passes the health check, do nothing. +// If the plugin fails the health check, the function either restarts or deactivates the plugin, based on the quantity and frequency of its failures. +func (job *PluginHealthCheckJob) CheckPlugin(id string) { + err := job.env.PerformHealthCheck(id) + if err == nil { + return + } + + mlog.Warn("Health check failed for plugin", mlog.String("id", id), mlog.Err(err)) + timestamps := job.getStoredTimestamps(id) + timestamps = append(timestamps, time.Now()) + + if shouldDeactivatePlugin(timestamps) { + // Order matters here, must deactivate first and then set plugin state + mlog.Debug("Deactivating plugin due to multiple crashes", mlog.String("id", id)) + job.env.Deactivate(id) + + // Reset timestamp state for this plugin + job.failureTimestamps.Delete(id) + job.env.setPluginState(id, model.PluginStateFailedToStayRunning) + } else { + mlog.Debug("Restarting plugin due to failed health check", mlog.String("id", id)) + if err := job.env.RestartPlugin(id); err != nil { + mlog.Error("Failed to restart plugin", mlog.String("id", id), mlog.Err(err)) + } + + // Store this failure so we can continue to monitor the plugin + job.failureTimestamps.Store(id, removeStaleTimestamps(timestamps)) + } +} + +// getStoredTimestamps returns the stored failure timestamps for a plugin. +func (job *PluginHealthCheckJob) getStoredTimestamps(id string) []time.Time { + timestamps, ok := job.failureTimestamps.Load(id) + if !ok { + timestamps = []time.Time{} + } + return timestamps.([]time.Time) +} + +func newPluginHealthCheckJob(env *Environment) *PluginHealthCheckJob { + return &PluginHealthCheckJob{ + cancel: make(chan struct{}), + cancelled: make(chan struct{}), + env: env, + } +} + +func (job *PluginHealthCheckJob) Cancel() { + job.cancelOnce.Do(func() { + close(job.cancel) + }) + <-job.cancelled +} + +// shouldDeactivatePlugin determines if a plugin needs to be deactivated after the plugin has failed (HealthCheckNumRestartsLimit) times, +// within the configured time window (HealthCheckDeactivationWindow). +func shouldDeactivatePlugin(failedTimestamps []time.Time) bool { + if len(failedTimestamps) < HealthCheckNumRestartsLimit { + return false + } + + index := len(failedTimestamps) - HealthCheckNumRestartsLimit + return time.Since(failedTimestamps[index]) <= HealthCheckDeactivationWindow +} + +// removeStaleTimestamps only keeps the last HealthCheckNumRestartsLimit items in timestamps. +func removeStaleTimestamps(timestamps []time.Time) []time.Time { + if len(timestamps) > HealthCheckNumRestartsLimit { + timestamps = timestamps[len(timestamps)-HealthCheckNumRestartsLimit:] + } + + return timestamps +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/plugin/helpers.go b/vendor/github.com/mattermost/mattermost-server/v5/plugin/helpers.go new file mode 100644 index 0000000..914fc2e --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/plugin/helpers.go @@ -0,0 +1,97 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package plugin + +import ( + "github.com/mattermost/mattermost-server/v5/model" +) + +// Helpers provide a common patterns plugins use. +// +// Plugins obtain access to the Helpers by embedding MattermostPlugin. +type Helpers interface { + // EnsureBot either returns an existing bot user matching the given bot, or creates a bot user from the given bot. + // A profile image or icon image may be optionally passed in to be set for the existing or newly created bot. + // Returns the id of the resulting bot. + // + // Minimum server version: 5.10 + EnsureBot(bot *model.Bot, options ...EnsureBotOption) (string, error) + + // KVSetJSON stores a key-value pair, unique per plugin, marshalling the given value as a JSON string. + // + // Deprecated: Use p.API.KVSetWithOptions instead. + // + // Minimum server version: 5.2 + KVSetJSON(key string, value interface{}) error + + // KVCompareAndSetJSON updates a key-value pair, unique per plugin, but only if the current value matches the given oldValue after marshalling as a JSON string. + // Inserts a new key if oldValue == nil. + // Returns (false, err) if DB error occurred + // Returns (false, nil) if current value != oldValue or key already exists when inserting + // Returns (true, nil) if current value == oldValue or new key is inserted + // + // Deprecated: Use p.API.KVSetWithOptions instead. + // + // Minimum server version: 5.12 + KVCompareAndSetJSON(key string, oldValue interface{}, newValue interface{}) (bool, error) + + // KVCompareAndDeleteJSON deletes a key-value pair, unique per plugin, but only if the current value matches the given oldValue after marshalling as a JSON string. + // Returns (false, err) if DB error occurred + // Returns (false, nil) if current value != oldValue or the key was already deleted + // Returns (true, nil) if current value == oldValue + // + // Minimum server version: 5.16 + KVCompareAndDeleteJSON(key string, oldValue interface{}) (bool, error) + + // KVGetJSON retrieves a value based on the key, unique per plugin, unmarshalling the previously set JSON string into the given value. Returns true if the key exists. + // + // Minimum server version: 5.2 + KVGetJSON(key string, value interface{}) (bool, error) + + // KVSetWithExpiryJSON stores a key-value pair with an expiry time, unique per plugin, marshalling the given value as a JSON string. + // + // Deprecated: Use p.API.KVSetWithOptions instead. + // + // Minimum server version: 5.6 + KVSetWithExpiryJSON(key string, value interface{}, expireInSeconds int64) error + + // KVListWithOptions returns all keys that match the given options. If no options are provided then all keys are returned. + // + // Minimum server version: 5.6 + KVListWithOptions(options ...KVListOption) ([]string, error) + + // CheckRequiredServerConfiguration checks if the server is configured according to + // plugin requirements. + // + // Minimum server version: 5.2 + CheckRequiredServerConfiguration(req *model.Config) (bool, error) + + // ShouldProcessMessage returns if the message should be processed by a message hook. + // + // Use this method to avoid processing unnecessary messages in a MessageHasBeenPosted + // or MessageWillBePosted hook, and indeed in some cases avoid an infinite loop between + // two automated bots or plugins. + // + // The behaviour is customizable using the given options, since plugin needs may vary. + // By default, system messages and messages from bots will be skipped. + // + // Minimum server version: 5.2 + ShouldProcessMessage(post *model.Post, options ...ShouldProcessMessageOption) (bool, error) + + // InstallPluginFromURL installs the plugin from the provided url. + // + // Minimum server version: 5.18 + InstallPluginFromURL(downloadURL string, replace bool) (*model.Manifest, error) + + // GetPluginAssetURL builds a URL to the given asset in the assets directory. + // Use this URL to link to assets from the webapp, or for third-party integrations with your plugin. + // + // Minimum server version: 5.2 + GetPluginAssetURL(pluginID, asset string) (string, error) +} + +// HelpersImpl implements the helpers interface with an API that retrieves data on behalf of the plugin. +type HelpersImpl struct { + API API +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/plugin/helpers_bots.go b/vendor/github.com/mattermost/mattermost-server/v5/plugin/helpers_bots.go new file mode 100644 index 0000000..ed30cb6 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/plugin/helpers_bots.go @@ -0,0 +1,299 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package plugin + +import ( + "io/ioutil" + "path/filepath" + + "github.com/pkg/errors" + + "github.com/mattermost/mattermost-server/v5/model" + "github.com/mattermost/mattermost-server/v5/utils" +) + +type ensureBotOptions struct { + ProfileImagePath string + IconImagePath string +} + +type EnsureBotOption func(*ensureBotOptions) + +func ProfileImagePath(path string) EnsureBotOption { + return func(args *ensureBotOptions) { + args.ProfileImagePath = path + } +} + +func IconImagePath(path string) EnsureBotOption { + return func(args *ensureBotOptions) { + args.IconImagePath = path + } +} + +// EnsureBot implements Helpers.EnsureBot +func (p *HelpersImpl) EnsureBot(bot *model.Bot, options ...EnsureBotOption) (retBotID string, retErr error) { + err := p.ensureServerVersion("5.10.0") + if err != nil { + return "", errors.Wrap(err, "failed to ensure bot") + } + + // Default options + o := &ensureBotOptions{ + ProfileImagePath: "", + IconImagePath: "", + } + + for _, setter := range options { + setter(o) + } + + botID, err := p.ensureBot(bot) + if err != nil { + return "", err + } + + err = p.setBotImages(botID, o.ProfileImagePath, o.IconImagePath) + if err != nil { + return "", err + } + return botID, nil +} + +type ShouldProcessMessageOption func(*shouldProcessMessageOptions) + +type shouldProcessMessageOptions struct { + AllowSystemMessages bool + AllowBots bool + AllowWebhook bool + FilterChannelIDs []string + FilterUserIDs []string + OnlyBotDMs bool +} + +// AllowSystemMessages configures a call to ShouldProcessMessage to return true for system messages. +// +// As it is typically desirable only to consume messages from users of the system, ShouldProcessMessage ignores system messages by default. +func AllowSystemMessages() ShouldProcessMessageOption { + return func(options *shouldProcessMessageOptions) { + options.AllowSystemMessages = true + } +} + +// AllowBots configures a call to ShouldProcessMessage to return true for bot posts. +// +// As it is typically desirable only to consume messages from human users of the system, ShouldProcessMessage ignores bot messages by default. When allowing bots, take care to avoid a loop where two plugins respond to each others posts repeatedly. +func AllowBots() ShouldProcessMessageOption { + return func(options *shouldProcessMessageOptions) { + options.AllowBots = true + } +} + +// AllowWebhook configures a call to ShouldProcessMessage to return true for posts from webhook. +// +// As it is typically desirable only to consume messages from human users of the system, ShouldProcessMessage ignores webhook messages by default. +func AllowWebhook() ShouldProcessMessageOption { + return func(options *shouldProcessMessageOptions) { + options.AllowWebhook = true + } +} + +// FilterChannelIDs configures a call to ShouldProcessMessage to return true only for the given channels. +// +// By default, posts from all channels are allowed to be processed. +func FilterChannelIDs(filterChannelIDs []string) ShouldProcessMessageOption { + return func(options *shouldProcessMessageOptions) { + options.FilterChannelIDs = filterChannelIDs + } +} + +// FilterUserIDs configures a call to ShouldProcessMessage to return true only for the given users. +// +// By default, posts from all non-bot users are allowed. +func FilterUserIDs(filterUserIDs []string) ShouldProcessMessageOption { + return func(options *shouldProcessMessageOptions) { + options.FilterUserIDs = filterUserIDs + } +} + +// OnlyBotDMs configures a call to ShouldProcessMessage to return true only for direct messages sent to the bot created by EnsureBot. +// +// By default, posts from all channels are allowed. +func OnlyBotDMs() ShouldProcessMessageOption { + return func(options *shouldProcessMessageOptions) { + options.OnlyBotDMs = true + } +} + +// ShouldProcessMessage implements Helpers.ShouldProcessMessage +func (p *HelpersImpl) ShouldProcessMessage(post *model.Post, options ...ShouldProcessMessageOption) (bool, error) { + messageProcessOptions := &shouldProcessMessageOptions{} + for _, option := range options { + option(messageProcessOptions) + } + + botIDBytes, kvGetErr := p.API.KVGet(BotUserKey) + if kvGetErr != nil { + return false, errors.Wrap(kvGetErr, "failed to get bot") + } + + if botIDBytes != nil { + if post.UserId == string(botIDBytes) { + return false, nil + } + } + + if post.IsSystemMessage() && !messageProcessOptions.AllowSystemMessages { + return false, nil + } + + if !messageProcessOptions.AllowWebhook && post.GetProp("from_webhook") == "true" { + return false, nil + } + + if !messageProcessOptions.AllowBots { + user, appErr := p.API.GetUser(post.UserId) + if appErr != nil { + return false, errors.Wrap(appErr, "unable to get user") + } + + if user.IsBot { + return false, nil + } + } + + if len(messageProcessOptions.FilterChannelIDs) != 0 && !utils.StringInSlice(post.ChannelId, messageProcessOptions.FilterChannelIDs) { + return false, nil + } + + if len(messageProcessOptions.FilterUserIDs) != 0 && !utils.StringInSlice(post.UserId, messageProcessOptions.FilterUserIDs) { + return false, nil + } + + if botIDBytes != nil && messageProcessOptions.OnlyBotDMs { + channel, appErr := p.API.GetChannel(post.ChannelId) + if appErr != nil { + return false, errors.Wrap(appErr, "unable to get channel") + } + + if !model.IsBotDMChannel(channel, string(botIDBytes)) { + return false, nil + } + } + + return true, nil +} + +func (p *HelpersImpl) readFile(path string) ([]byte, error) { + bundlePath, err := p.API.GetBundlePath() + if err != nil { + return nil, errors.Wrap(err, "failed to get bundle path") + } + + imageBytes, err := ioutil.ReadFile(filepath.Join(bundlePath, path)) + if err != nil { + return nil, errors.Wrap(err, "failed to read image") + } + return imageBytes, nil +} + +func (p *HelpersImpl) ensureBot(bot *model.Bot) (retBotID string, retErr error) { + // Must provide a bot with a username + if bot == nil || len(bot.Username) < 1 { + return "", errors.New("passed a bad bot, nil or no username") + } + + // If we fail for any reason, this could be a race between creation of bot and + // retrieval from another EnsureBot. Just try the basic retrieve existing again. + defer func() { + if retBotID == "" || retErr != nil { + var err error + var botIDBytes []byte + + err = utils.ProgressiveRetry(func() error { + botIDBytes, err = p.API.KVGet(BotUserKey) + if err != nil { + return err + } + return nil + }) + + if err == nil && botIDBytes != nil { + retBotID = string(botIDBytes) + retErr = nil + } + } + }() + + botIDBytes, kvGetErr := p.API.KVGet(BotUserKey) + if kvGetErr != nil { + return "", errors.Wrap(kvGetErr, "failed to get bot") + } + + // If the bot has already been created, use it + if botIDBytes != nil { + botID := string(botIDBytes) + + // ensure existing bot is synced with what is being created + botPatch := &model.BotPatch{ + Username: &bot.Username, + DisplayName: &bot.DisplayName, + Description: &bot.Description, + } + + if _, err := p.API.PatchBot(botID, botPatch); err != nil { + return "", errors.Wrap(err, "failed to patch bot") + } + + return botID, nil + } + + // Check for an existing bot user with that username. If one exists, then use that. + if user, userGetErr := p.API.GetUserByUsername(bot.Username); userGetErr == nil && user != nil { + if user.IsBot { + if kvSetErr := p.API.KVSet(BotUserKey, []byte(user.Id)); kvSetErr != nil { + p.API.LogWarn("Failed to set claimed bot user id.", "userid", user.Id, "err", kvSetErr) + } + } else { + p.API.LogError("Plugin attempted to use an account that already exists. Convert user to a bot account in the CLI by running 'mattermost user convert --bot'. If the user is an existing user account you want to preserve, change its username and restart the Mattermost server, after which the plugin will create a bot account with that name. For more information about bot accounts, see https://mattermost.com/pl/default-bot-accounts", "username", bot.Username, "user_id", user.Id) + } + return user.Id, nil + } + + // Create a new bot user for the plugin + createdBot, createBotErr := p.API.CreateBot(bot) + if createBotErr != nil { + return "", errors.Wrap(createBotErr, "failed to create bot") + } + + if kvSetErr := p.API.KVSet(BotUserKey, []byte(createdBot.UserId)); kvSetErr != nil { + p.API.LogWarn("Failed to set created bot user id.", "userid", createdBot.UserId, "err", kvSetErr) + } + + return createdBot.UserId, nil +} + +func (p *HelpersImpl) setBotImages(botID, profileImagePath, iconImagePath string) error { + if profileImagePath != "" { + imageBytes, err := p.readFile(profileImagePath) + if err != nil { + return errors.Wrap(err, "failed to read profile image") + } + appErr := p.API.SetProfileImage(botID, imageBytes) + if appErr != nil { + return errors.Wrap(appErr, "failed to set profile image") + } + } + if iconImagePath != "" { + imageBytes, err := p.readFile(iconImagePath) + if err != nil { + return errors.Wrap(err, "failed to read icon image") + } + appErr := p.API.SetBotIconImage(botID, imageBytes) + if appErr != nil { + return errors.Wrap(appErr, "failed to set icon image") + } + } + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/plugin/helpers_config.go b/vendor/github.com/mattermost/mattermost-server/v5/plugin/helpers_config.go new file mode 100644 index 0000000..512fbbd --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/plugin/helpers_config.go @@ -0,0 +1,32 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package plugin + +import ( + "github.com/pkg/errors" + + "github.com/mattermost/mattermost-server/v5/model" + "github.com/mattermost/mattermost-server/v5/utils" +) + +// CheckRequiredServerConfiguration implements Helpers.CheckRequiredServerConfiguration +func (p *HelpersImpl) CheckRequiredServerConfiguration(req *model.Config) (bool, error) { + if req == nil { + return true, nil + } + + cfg := p.API.GetConfig() + + mc, err := utils.Merge(cfg, req, nil) + if err != nil { + return false, errors.Wrap(err, "could not merge configurations") + } + + mergedCfg := mc.(model.Config) + if mergedCfg.ToJson() != cfg.ToJson() { + return false, nil + } + + return true, nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/plugin/helpers_kv.go b/vendor/github.com/mattermost/mattermost-server/v5/plugin/helpers_kv.go new file mode 100644 index 0000000..890e812 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/plugin/helpers_kv.go @@ -0,0 +1,222 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package plugin + +import ( + "encoding/json" + "strings" + + "github.com/pkg/errors" +) + +// KVSetJSON implements Helpers.KVSetJSON. +func (p *HelpersImpl) KVSetJSON(key string, value interface{}) error { + err := p.ensureServerVersion("5.2.0") + if err != nil { + return err + } + + data, err := json.Marshal(value) + if err != nil { + return err + } + + appErr := p.API.KVSet(key, data) + if appErr != nil { + return appErr + } + + return nil +} + +// KVCompareAndSetJSON implements Helpers.KVCompareAndSetJSON. +func (p *HelpersImpl) KVCompareAndSetJSON(key string, oldValue interface{}, newValue interface{}) (bool, error) { + var err error + + err = p.ensureServerVersion("5.12.0") + if err != nil { + return false, err + } + var oldData, newData []byte + + if oldValue != nil { + oldData, err = json.Marshal(oldValue) + if err != nil { + return false, errors.Wrap(err, "unable to marshal old value") + } + } + + if newValue != nil { + newData, err = json.Marshal(newValue) + if err != nil { + return false, errors.Wrap(err, "unable to marshal new value") + } + } + + set, appErr := p.API.KVCompareAndSet(key, oldData, newData) + if appErr != nil { + return set, appErr + } + + return set, nil +} + +// KVCompareAndDeleteJSON implements Helpers.KVCompareAndDeleteJSON. +func (p *HelpersImpl) KVCompareAndDeleteJSON(key string, oldValue interface{}) (bool, error) { + var err error + + err = p.ensureServerVersion("5.16.0") + if err != nil { + return false, err + } + + var oldData []byte + + if oldValue != nil { + oldData, err = json.Marshal(oldValue) + if err != nil { + return false, errors.Wrap(err, "unable to marshal old value") + } + } + + deleted, appErr := p.API.KVCompareAndDelete(key, oldData) + if appErr != nil { + return deleted, appErr + } + + return deleted, nil +} + +// KVGetJSON implements Helpers.KVGetJSON. +func (p *HelpersImpl) KVGetJSON(key string, value interface{}) (bool, error) { + err := p.ensureServerVersion("5.2.0") + if err != nil { + return false, err + } + + data, appErr := p.API.KVGet(key) + if appErr != nil { + return false, appErr + } + if data == nil { + return false, nil + } + + err = json.Unmarshal(data, value) + if err != nil { + return false, err + } + + return true, nil +} + +// KVSetWithExpiryJSON is a wrapper around KVSetWithExpiry to simplify atomically writing a JSON object with expiry to the key value store. +func (p *HelpersImpl) KVSetWithExpiryJSON(key string, value interface{}, expireInSeconds int64) error { + err := p.ensureServerVersion("5.6.0") + if err != nil { + return err + } + + data, err := json.Marshal(value) + if err != nil { + return err + } + + appErr := p.API.KVSetWithExpiry(key, data, expireInSeconds) + if appErr != nil { + return appErr + } + + return nil +} + +type kvListOptions struct { + checkers []func(key string) (keep bool, err error) +} + +func (o *kvListOptions) checkAll(key string) (keep bool, err error) { + for _, check := range o.checkers { + keep, err := check(key) + if err != nil { + return false, err + } + if !keep { + return false, nil + } + } + + // key made it through all checkers + return true, nil +} + +// KVListOption represents a single input option for KVListWithOptions +type KVListOption func(*kvListOptions) + +// WithPrefix only return keys that start with the given string. +func WithPrefix(prefix string) KVListOption { + return WithChecker(func(key string) (keep bool, err error) { + return strings.HasPrefix(key, prefix), nil + }) +} + +// WithChecker allows for a custom filter function to determine which keys to return. +// Returning true will keep the key and false will filter it out. Returning an error +// will halt KVListWithOptions immediately and pass the error up (with no other results). +func WithChecker(f func(key string) (keep bool, err error)) KVListOption { + return func(args *kvListOptions) { + args.checkers = append(args.checkers, f) + } +} + +// kvListPerPage is the number of keys KVListWithOptions gets per request +const kvListPerPage = 100 + +// KVListWithOptions implements Helpers.KVListWithOptions. +func (p *HelpersImpl) KVListWithOptions(options ...KVListOption) ([]string, error) { + err := p.ensureServerVersion("5.6.0") + if err != nil { + return nil, err + } + // convert functional options into args struct + args := &kvListOptions{} + for _, opt := range options { + opt(args) + } + ret := make([]string, 0) + + // get our keys a batch at a time, filter out the ones we don't want based on our args + // any errors will hault the whole process and return the error raw + for i := 0; ; i++ { + keys, appErr := p.API.KVList(i, kvListPerPage) + if appErr != nil { + return nil, appErr + } + + if len(args.checkers) == 0 { + // no checkers, just append the whole block at once + ret = append(ret, keys...) + } else { + // we have a filter, so check each key, all checkers must say key + // for us to keep a key + for _, key := range keys { + keep, err := args.checkAll(key) + if err != nil { + return nil, err + } + if !keep { + continue + } + + // didn't get filtered out, add to our return + ret = append(ret, key) + } + } + + if len(keys) < kvListPerPage { + break + } + } + + return ret, nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/plugin/helpers_plugin.go b/vendor/github.com/mattermost/mattermost-server/v5/plugin/helpers_plugin.go new file mode 100644 index 0000000..1bb9e47 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/plugin/helpers_plugin.go @@ -0,0 +1,81 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package plugin + +import ( + "net/http" + "net/url" + "path" + "time" + + "github.com/blang/semver" + "github.com/pkg/errors" + + "github.com/mattermost/mattermost-server/v5/model" +) + +// InstallPluginFromURL implements Helpers.InstallPluginFromURL. +func (p *HelpersImpl) InstallPluginFromURL(downloadURL string, replace bool) (*model.Manifest, error) { + err := p.ensureServerVersion("5.18.0") + if err != nil { + return nil, err + } + + parsedURL, err := url.Parse(downloadURL) + if err != nil { + return nil, errors.Wrap(err, "error while parsing url") + } + + client := &http.Client{Timeout: time.Hour} + response, err := client.Get(parsedURL.String()) + if err != nil { + return nil, errors.Wrap(err, "unable to download the plugin") + } + defer response.Body.Close() + + if response.StatusCode != http.StatusOK { + return nil, errors.Errorf("received %d status code while downloading plugin from server", response.StatusCode) + } + + manifest, installError := p.API.InstallPlugin(response.Body, replace) + if installError != nil { + return nil, errors.Wrap(err, "unable to install plugin on server") + } + + return manifest, nil +} + +func (p *HelpersImpl) ensureServerVersion(required string) error { + serverVersion := p.API.GetServerVersion() + currentVersion := semver.MustParse(serverVersion) + requiredVersion := semver.MustParse(required) + + if currentVersion.LT(requiredVersion) { + return errors.Errorf("incompatible server version for plugin, minimum required version: %s, current version: %s", required, serverVersion) + } + return nil +} + +// GetPluginAssetURL implements GetPluginAssetURL. +func (p *HelpersImpl) GetPluginAssetURL(pluginID, asset string) (string, error) { + if len(pluginID) == 0 { + return "", errors.New("empty pluginID provided") + } + + if len(asset) == 0 { + return "", errors.New("empty asset name provided") + } + + siteURL := *p.API.GetConfig().ServiceSettings.SiteURL + if siteURL == "" { + return "", errors.New("no SiteURL configured by the server") + } + + u, err := url.Parse(siteURL + path.Join("/", pluginID, asset)) + if err != nil { + return "", err + } + + return u.String(), nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/plugin/hijack.go b/vendor/github.com/mattermost/mattermost-server/v5/plugin/hijack.go new file mode 100644 index 0000000..ffe178a --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/plugin/hijack.go @@ -0,0 +1,205 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package plugin + +import ( + "bufio" + "errors" + "net" + "net/http" + "net/rpc" + "time" +) + +const ( + hijackedConnReadBufSize = 4096 +) + +var ( + ErrNotHijacked = errors.New("response is not hijacked") + ErrAlreadyHijacked = errors.New("response was already hijacked") + ErrCannotHijack = errors.New("response cannot be hijacked") +) + +func (w *httpResponseWriterRPCServer) HjConnRWRead(b []byte, reply *[]byte) error { + if w.hjr == nil { + return ErrNotHijacked + } + data := make([]byte, len(b)) + n, err := w.hjr.bufrw.Read(data) + if err != nil { + return err + } + *reply = data[:n] + return nil +} + +func (w *httpResponseWriterRPCServer) HjConnRWWrite(b []byte, reply *int) error { + if w.hjr == nil { + return ErrNotHijacked + } + n, err := w.hjr.bufrw.Write(b) + if err != nil { + return err + } + *reply = n + return nil +} + +func (w *httpResponseWriterRPCServer) HjConnRead(size int, reply *[]byte) error { + if w.hjr == nil { + return ErrNotHijacked + } + if len(w.hjr.readBuf) < size { + w.hjr.readBuf = make([]byte, size) + } + n, err := w.hjr.conn.Read(w.hjr.readBuf[:size]) + if err != nil { + return err + } + *reply = w.hjr.readBuf[:n] + return nil +} + +func (w *httpResponseWriterRPCServer) HjConnWrite(b []byte, reply *int) error { + if w.hjr == nil { + return ErrNotHijacked + } + n, err := w.hjr.conn.Write(b) + if err != nil { + return err + } + *reply = n + return nil +} + +func (w *httpResponseWriterRPCServer) HjConnClose(args struct{}, reply *struct{}) error { + if w.hjr == nil { + return ErrNotHijacked + } + return w.hjr.conn.Close() +} + +func (w *httpResponseWriterRPCServer) HjConnSetDeadline(t time.Time, reply *struct{}) error { + if w.hjr == nil { + return ErrNotHijacked + } + return w.hjr.conn.SetDeadline(t) +} + +func (w *httpResponseWriterRPCServer) HjConnSetReadDeadline(t time.Time, reply *struct{}) error { + if w.hjr == nil { + return ErrNotHijacked + } + return w.hjr.conn.SetReadDeadline(t) +} + +func (w *httpResponseWriterRPCServer) HjConnSetWriteDeadline(t time.Time, reply *struct{}) error { + if w.hjr == nil { + return ErrNotHijacked + } + return w.hjr.conn.SetWriteDeadline(t) +} + +func (w *httpResponseWriterRPCServer) HijackResponse(args struct{}, reply *struct{}) error { + if w.hjr != nil { + return ErrAlreadyHijacked + } + hj, ok := w.w.(http.Hijacker) + if !ok { + return ErrCannotHijack + } + conn, bufrw, err := hj.Hijack() + if err != nil { + return err + } + + w.hjr = &hijackedResponse{ + conn: conn, + bufrw: bufrw, + readBuf: make([]byte, hijackedConnReadBufSize), + } + return nil +} + +type hijackedConn struct { + client *rpc.Client +} + +type hijackedConnRW struct { + client *rpc.Client +} + +func (w *hijackedConnRW) Read(b []byte) (int, error) { + var data []byte + if err := w.client.Call("Plugin.HjConnRWRead", b, &data); err != nil { + return 0, err + } + copy(b, data) + return len(data), nil +} + +func (w *hijackedConnRW) Write(b []byte) (int, error) { + var n int + if err := w.client.Call("Plugin.HjConnRWWrite", b, &n); err != nil { + return 0, err + } + return n, nil +} + +func (w *hijackedConn) Read(b []byte) (int, error) { + var data []byte + if err := w.client.Call("Plugin.HjConnRead", len(b), &data); err != nil { + return 0, err + } + copy(b, data) + return len(data), nil +} + +func (w *hijackedConn) Write(b []byte) (int, error) { + var n int + if err := w.client.Call("Plugin.HjConnWrite", b, &n); err != nil { + return 0, err + } + return n, nil +} + +func (w *hijackedConn) Close() error { + return w.client.Call("Plugin.HjConnClose", struct{}{}, nil) +} + +func (w *hijackedConn) LocalAddr() net.Addr { + return nil +} + +func (w *hijackedConn) RemoteAddr() net.Addr { + return nil +} + +func (w *hijackedConn) SetDeadline(t time.Time) error { + return w.client.Call("Plugin.HjConnSetDeadline", t, nil) +} + +func (w *hijackedConn) SetReadDeadline(t time.Time) error { + return w.client.Call("Plugin.HjConnSetReadDeadline", t, nil) +} + +func (w *hijackedConn) SetWriteDeadline(t time.Time) error { + return w.client.Call("Plugin.HjConnSetWriteDeadline", t, nil) +} + +func (w *httpResponseWriterRPCClient) Hijack() (net.Conn, *bufio.ReadWriter, error) { + c := &hijackedConn{ + client: w.client, + } + rw := &hijackedConnRW{ + client: w.client, + } + + if err := w.client.Call("Plugin.HijackResponse", struct{}{}, nil); err != nil { + return nil, nil, err + } + + return c, bufio.NewReadWriter(bufio.NewReader(rw), bufio.NewWriter(rw)), nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/plugin/hooks.go b/vendor/github.com/mattermost/mattermost-server/v5/plugin/hooks.go similarity index 77% rename from vendor/github.com/mattermost/mattermost-server/plugin/hooks.go rename to vendor/github.com/mattermost/mattermost-server/v5/plugin/hooks.go index 2b2d912..1e85a66 100644 --- a/vendor/github.com/mattermost/mattermost-server/plugin/hooks.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/plugin/hooks.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package plugin @@ -7,7 +7,7 @@ import ( "io" "net/http" - "github.com/mattermost/mattermost-server/model" + "github.com/mattermost/mattermost-server/v5/model" ) // These assignments are part of the wire protocol used to trigger hook events in plugins. @@ -15,25 +15,27 @@ import ( // Feel free to add more, but do not change existing assignments. Follow the naming convention of // Id as the autogenerated glue code depends on that. const ( - OnActivateId = 0 - OnDeactivateId = 1 - ServeHTTPId = 2 - OnConfigurationChangeId = 3 - ExecuteCommandId = 4 - MessageWillBePostedId = 5 - MessageWillBeUpdatedId = 6 - MessageHasBeenPostedId = 7 - MessageHasBeenUpdatedId = 8 - UserHasJoinedChannelId = 9 - UserHasLeftChannelId = 10 - UserHasJoinedTeamId = 11 - UserHasLeftTeamId = 12 - ChannelHasBeenCreatedId = 13 - FileWillBeUploadedId = 14 - UserWillLogInId = 15 - UserHasLoggedInId = 16 - UserHasBeenCreatedId = 17 - TotalHooksId = iota + OnActivateId = 0 + OnDeactivateId = 1 + ServeHTTPId = 2 + OnConfigurationChangeId = 3 + ExecuteCommandId = 4 + MessageWillBePostedId = 5 + MessageWillBeUpdatedId = 6 + MessageHasBeenPostedId = 7 + MessageHasBeenUpdatedId = 8 + UserHasJoinedChannelId = 9 + UserHasLeftChannelId = 10 + UserHasJoinedTeamId = 11 + UserHasLeftTeamId = 12 + ChannelHasBeenCreatedId = 13 + FileWillBeUploadedId = 14 + UserWillLogInId = 15 + UserHasLoggedInId = 16 + UserHasBeenCreatedId = 17 + ReactionHasBeenAddedId = 18 + ReactionHasBeenRemovedId = 19 + TotalHooksId = iota ) const ( @@ -50,20 +52,28 @@ type Hooks interface { // OnActivate is invoked when the plugin is activated. If an error is returned, the plugin // will be terminated. The plugin will not receive hooks until after OnActivate returns // without error. OnConfigurationChange will be called once before OnActivate. + // + // Minimum server version: 5.2 OnActivate() error // Implemented returns a list of hooks that are implemented by the plugin. // Plugins do not need to provide an implementation. Any given will be ignored. + // + // Minimum server version: 5.2 Implemented() ([]string, error) // OnDeactivate is invoked when the plugin is deactivated. This is the plugin's last chance to // use the API, and the plugin will be terminated shortly after this invocation. The plugin // will stop receiving hooks just prior to this method being called. + // + // Minimum server version: 5.2 OnDeactivate() error // OnConfigurationChange is invoked when configuration changes may have been made. Any // returned error is logged, but does not stop the plugin. You must be prepared to handle // a configuration failure gracefully. It is called once before OnActivate. + // + // Minimum server version: 5.2 OnConfigurationChange() error // ServeHTTP allows the plugin to implement the http.Handler interface. Requests destined for @@ -71,10 +81,14 @@ type Hooks interface { // // The Mattermost-User-Id header will be present if (and only if) the request is by an // authenticated user. + // + // Minimum server version: 5.2 ServeHTTP(c *Context, w http.ResponseWriter, r *http.Request) // ExecuteCommand executes a command that has been previously registered via the RegisterCommand // API. + // + // Minimum server version: 5.2 ExecuteCommand(c *Context, args *model.CommandArgs) (*model.CommandResponse, *model.AppError) // UserHasBeenCreated is invoked after a user was created. @@ -84,9 +98,13 @@ type Hooks interface { // UserWillLogIn before the login of the user is returned. Returning a non empty string will reject the login event. // If you don't need to reject the login event, see UserHasLoggedIn + // + // Minimum server version: 5.2 UserWillLogIn(c *Context, user *model.User) string // UserHasLoggedIn is invoked after a user has logged in. + // + // Minimum server version: 5.2 UserHasLoggedIn(c *Context, user *model.User) // MessageWillBePosted is invoked when a message is posted by a user before it is committed @@ -101,6 +119,8 @@ type Hooks interface { // // Note that this method will be called for posts created by plugins, including the plugin that // created the post. + // + // Minimum server version: 5.2 MessageWillBePosted(c *Context, post *model.Post) (*model.Post, string) // MessageWillBeUpdated is invoked when a message is updated by a user before it is committed @@ -112,37 +132,53 @@ type Hooks interface { // // Note that this method will be called for posts updated by plugins, including the plugin that // updated the post. + // + // Minimum server version: 5.2 MessageWillBeUpdated(c *Context, newPost, oldPost *model.Post) (*model.Post, string) // MessageHasBeenPosted is invoked after the message has been committed to the database. // If you need to modify or reject the post, see MessageWillBePosted // Note that this method will be called for posts created by plugins, including the plugin that // created the post. + // + // Minimum server version: 5.2 MessageHasBeenPosted(c *Context, post *model.Post) // MessageHasBeenUpdated is invoked after a message is updated and has been updated in the database. // If you need to modify or reject the post, see MessageWillBeUpdated // Note that this method will be called for posts created by plugins, including the plugin that // created the post. + // + // Minimum server version: 5.2 MessageHasBeenUpdated(c *Context, newPost, oldPost *model.Post) // ChannelHasBeenCreated is invoked after the channel has been committed to the database. + // + // Minimum server version: 5.2 ChannelHasBeenCreated(c *Context, channel *model.Channel) // UserHasJoinedChannel is invoked after the membership has been committed to the database. // If actor is not nil, the user was invited to the channel by the actor. + // + // Minimum server version: 5.2 UserHasJoinedChannel(c *Context, channelMember *model.ChannelMember, actor *model.User) // UserHasLeftChannel is invoked after the membership has been removed from the database. // If actor is not nil, the user was removed from the channel by the actor. + // + // Minimum server version: 5.2 UserHasLeftChannel(c *Context, channelMember *model.ChannelMember, actor *model.User) // UserHasJoinedTeam is invoked after the membership has been committed to the database. // If actor is not nil, the user was added to the team by the actor. + // + // Minimum server version: 5.2 UserHasJoinedTeam(c *Context, teamMember *model.TeamMember, actor *model.User) // UserHasLeftTeam is invoked after the membership has been removed from the database. // If actor is not nil, the user was removed from the team by the actor. + // + // Minimum server version: 5.2 UserHasLeftTeam(c *Context, teamMember *model.TeamMember, actor *model.User) // FileWillBeUploaded is invoked when a file is uploaded, but before it is committed to backing store. @@ -154,5 +190,23 @@ type Hooks interface { // // Note that this method will be called for files uploaded by plugins, including the plugin that uploaded the post. // FileInfo.Size will be automatically set properly if you modify the file. + // + // Minimum server version: 5.2 FileWillBeUploaded(c *Context, info *model.FileInfo, file io.Reader, output io.Writer) (*model.FileInfo, string) + + // ReactionHasBeenAdded is invoked after the reaction has been committed to the database. + // + // Note that this method will be called for reactions added by plugins, including the plugin that + // added the reaction. + // + // Minimum server version: 5.30 + ReactionHasBeenAdded(c *Context, reaction *model.Reaction) + + // ReactionHasBeenRemoved is invoked after the removal of the reaction has been committed to the database. + // + // Note that this method will be called for reactions removed by plugins, including the plugin that + // removed the reaction. + // + // Minimum server version: 5.30 + ReactionHasBeenRemoved(c *Context, reaction *model.Reaction) } diff --git a/vendor/github.com/mattermost/mattermost-server/v5/plugin/hooks_timer_layer_generated.go b/vendor/github.com/mattermost/mattermost-server/v5/plugin/hooks_timer_layer_generated.go new file mode 100644 index 0000000..05a0a4b --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/plugin/hooks_timer_layer_generated.go @@ -0,0 +1,164 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +// Code generated by "make pluginapi" +// DO NOT EDIT + +package plugin + +import ( + "io" + "net/http" + timePkg "time" + + "github.com/mattermost/mattermost-server/v5/einterfaces" + "github.com/mattermost/mattermost-server/v5/model" +) + +type hooksTimerLayer struct { + pluginID string + hooksImpl Hooks + metrics einterfaces.MetricsInterface +} + +func (hooks *hooksTimerLayer) recordTime(startTime timePkg.Time, name string, success bool) { + if hooks.metrics != nil { + elapsedTime := float64(timePkg.Since(startTime)) / float64(timePkg.Second) + hooks.metrics.ObservePluginHookDuration(hooks.pluginID, name, success, elapsedTime) + } +} + +func (hooks *hooksTimerLayer) OnActivate() error { + startTime := timePkg.Now() + _returnsA := hooks.hooksImpl.OnActivate() + hooks.recordTime(startTime, "OnActivate", _returnsA == nil) + return _returnsA +} + +func (hooks *hooksTimerLayer) Implemented() ([]string, error) { + startTime := timePkg.Now() + _returnsA, _returnsB := hooks.hooksImpl.Implemented() + hooks.recordTime(startTime, "Implemented", _returnsB == nil) + return _returnsA, _returnsB +} + +func (hooks *hooksTimerLayer) OnDeactivate() error { + startTime := timePkg.Now() + _returnsA := hooks.hooksImpl.OnDeactivate() + hooks.recordTime(startTime, "OnDeactivate", _returnsA == nil) + return _returnsA +} + +func (hooks *hooksTimerLayer) OnConfigurationChange() error { + startTime := timePkg.Now() + _returnsA := hooks.hooksImpl.OnConfigurationChange() + hooks.recordTime(startTime, "OnConfigurationChange", _returnsA == nil) + return _returnsA +} + +func (hooks *hooksTimerLayer) ServeHTTP(c *Context, w http.ResponseWriter, r *http.Request) { + startTime := timePkg.Now() + hooks.hooksImpl.ServeHTTP(c, w, r) + hooks.recordTime(startTime, "ServeHTTP", true) +} + +func (hooks *hooksTimerLayer) ExecuteCommand(c *Context, args *model.CommandArgs) (*model.CommandResponse, *model.AppError) { + startTime := timePkg.Now() + _returnsA, _returnsB := hooks.hooksImpl.ExecuteCommand(c, args) + hooks.recordTime(startTime, "ExecuteCommand", _returnsB == nil) + return _returnsA, _returnsB +} + +func (hooks *hooksTimerLayer) UserHasBeenCreated(c *Context, user *model.User) { + startTime := timePkg.Now() + hooks.hooksImpl.UserHasBeenCreated(c, user) + hooks.recordTime(startTime, "UserHasBeenCreated", true) +} + +func (hooks *hooksTimerLayer) UserWillLogIn(c *Context, user *model.User) string { + startTime := timePkg.Now() + _returnsA := hooks.hooksImpl.UserWillLogIn(c, user) + hooks.recordTime(startTime, "UserWillLogIn", true) + return _returnsA +} + +func (hooks *hooksTimerLayer) UserHasLoggedIn(c *Context, user *model.User) { + startTime := timePkg.Now() + hooks.hooksImpl.UserHasLoggedIn(c, user) + hooks.recordTime(startTime, "UserHasLoggedIn", true) +} + +func (hooks *hooksTimerLayer) MessageWillBePosted(c *Context, post *model.Post) (*model.Post, string) { + startTime := timePkg.Now() + _returnsA, _returnsB := hooks.hooksImpl.MessageWillBePosted(c, post) + hooks.recordTime(startTime, "MessageWillBePosted", true) + return _returnsA, _returnsB +} + +func (hooks *hooksTimerLayer) MessageWillBeUpdated(c *Context, newPost, oldPost *model.Post) (*model.Post, string) { + startTime := timePkg.Now() + _returnsA, _returnsB := hooks.hooksImpl.MessageWillBeUpdated(c, newPost, oldPost) + hooks.recordTime(startTime, "MessageWillBeUpdated", true) + return _returnsA, _returnsB +} + +func (hooks *hooksTimerLayer) MessageHasBeenPosted(c *Context, post *model.Post) { + startTime := timePkg.Now() + hooks.hooksImpl.MessageHasBeenPosted(c, post) + hooks.recordTime(startTime, "MessageHasBeenPosted", true) +} + +func (hooks *hooksTimerLayer) MessageHasBeenUpdated(c *Context, newPost, oldPost *model.Post) { + startTime := timePkg.Now() + hooks.hooksImpl.MessageHasBeenUpdated(c, newPost, oldPost) + hooks.recordTime(startTime, "MessageHasBeenUpdated", true) +} + +func (hooks *hooksTimerLayer) ChannelHasBeenCreated(c *Context, channel *model.Channel) { + startTime := timePkg.Now() + hooks.hooksImpl.ChannelHasBeenCreated(c, channel) + hooks.recordTime(startTime, "ChannelHasBeenCreated", true) +} + +func (hooks *hooksTimerLayer) UserHasJoinedChannel(c *Context, channelMember *model.ChannelMember, actor *model.User) { + startTime := timePkg.Now() + hooks.hooksImpl.UserHasJoinedChannel(c, channelMember, actor) + hooks.recordTime(startTime, "UserHasJoinedChannel", true) +} + +func (hooks *hooksTimerLayer) UserHasLeftChannel(c *Context, channelMember *model.ChannelMember, actor *model.User) { + startTime := timePkg.Now() + hooks.hooksImpl.UserHasLeftChannel(c, channelMember, actor) + hooks.recordTime(startTime, "UserHasLeftChannel", true) +} + +func (hooks *hooksTimerLayer) UserHasJoinedTeam(c *Context, teamMember *model.TeamMember, actor *model.User) { + startTime := timePkg.Now() + hooks.hooksImpl.UserHasJoinedTeam(c, teamMember, actor) + hooks.recordTime(startTime, "UserHasJoinedTeam", true) +} + +func (hooks *hooksTimerLayer) UserHasLeftTeam(c *Context, teamMember *model.TeamMember, actor *model.User) { + startTime := timePkg.Now() + hooks.hooksImpl.UserHasLeftTeam(c, teamMember, actor) + hooks.recordTime(startTime, "UserHasLeftTeam", true) +} + +func (hooks *hooksTimerLayer) FileWillBeUploaded(c *Context, info *model.FileInfo, file io.Reader, output io.Writer) (*model.FileInfo, string) { + startTime := timePkg.Now() + _returnsA, _returnsB := hooks.hooksImpl.FileWillBeUploaded(c, info, file, output) + hooks.recordTime(startTime, "FileWillBeUploaded", true) + return _returnsA, _returnsB +} + +func (hooks *hooksTimerLayer) ReactionHasBeenAdded(c *Context, reaction *model.Reaction) { + startTime := timePkg.Now() + hooks.hooksImpl.ReactionHasBeenAdded(c, reaction) + hooks.recordTime(startTime, "ReactionHasBeenAdded", true) +} + +func (hooks *hooksTimerLayer) ReactionHasBeenRemoved(c *Context, reaction *model.Reaction) { + startTime := timePkg.Now() + hooks.hooksImpl.ReactionHasBeenRemoved(c, reaction) + hooks.recordTime(startTime, "ReactionHasBeenRemoved", true) +} diff --git a/vendor/github.com/mattermost/mattermost-server/plugin/http.go b/vendor/github.com/mattermost/mattermost-server/v5/plugin/http.go similarity index 70% rename from vendor/github.com/mattermost/mattermost-server/plugin/http.go rename to vendor/github.com/mattermost/mattermost-server/v5/plugin/http.go index 7d16503..b54f376 100644 --- a/vendor/github.com/mattermost/mattermost-server/plugin/http.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/plugin/http.go @@ -4,13 +4,27 @@ package plugin import ( + "bufio" + "errors" + "fmt" "io" + "net" "net/http" "net/rpc" + + "github.com/mattermost/mattermost-server/v5/mlog" ) +type hijackedResponse struct { + conn net.Conn + bufrw *bufio.ReadWriter + readBuf []byte +} + type httpResponseWriterRPCServer struct { - w http.ResponseWriter + w http.ResponseWriter + log *mlog.Logger + hjr *hijackedResponse } func (w *httpResponseWriterRPCServer) Header(args struct{}, reply *http.Header) error { @@ -24,6 +38,12 @@ func (w *httpResponseWriterRPCServer) Write(args []byte, reply *struct{}) error } func (w *httpResponseWriterRPCServer) WriteHeader(args int, reply *struct{}) error { + // Check if args is a valid http status code. This prevents plugins from crashing the server with a panic. + // This is a copy of the checkWriteHeaderCode function in net/http/server.go in the go source. + if args < 100 || args > 999 { + w.log.Error(fmt.Sprintf("Plugin tried to write an invalid http status code: %v. Did not write the invalid header.", args)) + return errors.New("invalid http status code") + } w.w.WriteHeader(args) return nil } @@ -72,8 +92,8 @@ func (w *httpResponseWriterRPCClient) WriteHeader(statusCode int) { w.client.Call("Plugin.WriteHeader", statusCode, nil) } -func (h *httpResponseWriterRPCClient) Close() error { - return h.client.Close() +func (w *httpResponseWriterRPCClient) Close() error { + return w.client.Close() } func connectHTTPResponseWriter(conn io.ReadWriteCloser) *httpResponseWriterRPCClient { diff --git a/vendor/github.com/mattermost/mattermost-server/plugin/io_rpc.go b/vendor/github.com/mattermost/mattermost-server/v5/plugin/io_rpc.go similarity index 100% rename from vendor/github.com/mattermost/mattermost-server/plugin/io_rpc.go rename to vendor/github.com/mattermost/mattermost-server/v5/plugin/io_rpc.go diff --git a/vendor/github.com/mattermost/mattermost-server/plugin/plugintest/api.go b/vendor/github.com/mattermost/mattermost-server/v5/plugin/plugintest/api.go similarity index 82% rename from vendor/github.com/mattermost/mattermost-server/plugin/plugintest/api.go rename to vendor/github.com/mattermost/mattermost-server/v5/plugin/plugintest/api.go index 768e763..f2aa7c3 100644 --- a/vendor/github.com/mattermost/mattermost-server/plugin/plugintest/api.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/plugin/plugintest/api.go @@ -4,8 +4,14 @@ package plugintest -import mock "github.com/stretchr/testify/mock" -import model "github.com/mattermost/mattermost-server/model" +import ( + io "io" + http "net/http" + + mock "github.com/stretchr/testify/mock" + + model "github.com/mattermost/mattermost-server/v5/model" +) // API is an autogenerated mock type for the API type type API struct { @@ -62,6 +68,31 @@ func (_m *API) AddReaction(reaction *model.Reaction) (*model.Reaction, *model.Ap return r0, r1 } +// AddUserToChannel provides a mock function with given fields: channelId, userId, asUserId +func (_m *API) AddUserToChannel(channelId string, userId string, asUserId string) (*model.ChannelMember, *model.AppError) { + ret := _m.Called(channelId, userId, asUserId) + + var r0 *model.ChannelMember + if rf, ok := ret.Get(0).(func(string, string, string) *model.ChannelMember); ok { + r0 = rf(channelId, userId, asUserId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*model.ChannelMember) + } + } + + var r1 *model.AppError + if rf, ok := ret.Get(1).(func(string, string, string) *model.AppError); ok { + r1 = rf(channelId, userId, asUserId) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*model.AppError) + } + } + + return r0, r1 +} + // CopyFileInfos provides a mock function with given fields: userId, fileIds func (_m *API) CopyFileInfos(userId string, fileIds []string) ([]string, *model.AppError) { ret := _m.Called(userId, fileIds) @@ -137,6 +168,29 @@ func (_m *API) CreateChannel(channel *model.Channel) (*model.Channel, *model.App return r0, r1 } +// CreateCommand provides a mock function with given fields: cmd +func (_m *API) CreateCommand(cmd *model.Command) (*model.Command, error) { + ret := _m.Called(cmd) + + var r0 *model.Command + if rf, ok := ret.Get(0).(func(*model.Command) *model.Command); ok { + r0 = rf(cmd) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*model.Command) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(*model.Command) error); ok { + r1 = rf(cmd) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // CreatePost provides a mock function with given fields: post func (_m *API) CreatePost(post *model.Post) (*model.Post, *model.AppError) { ret := _m.Called(post) @@ -237,6 +291,31 @@ func (_m *API) CreateTeamMembers(teamId string, userIds []string, requestorId st return r0, r1 } +// CreateTeamMembersGracefully provides a mock function with given fields: teamId, userIds, requestorId +func (_m *API) CreateTeamMembersGracefully(teamId string, userIds []string, requestorId string) ([]*model.TeamMemberWithError, *model.AppError) { + ret := _m.Called(teamId, userIds, requestorId) + + var r0 []*model.TeamMemberWithError + if rf, ok := ret.Get(0).(func(string, []string, string) []*model.TeamMemberWithError); ok { + r0 = rf(teamId, userIds, requestorId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*model.TeamMemberWithError) + } + } + + var r1 *model.AppError + if rf, ok := ret.Get(1).(func(string, []string, string) *model.AppError); ok { + r1 = rf(teamId, userIds, requestorId) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*model.AppError) + } + } + + return r0, r1 +} + // CreateUser provides a mock function with given fields: user func (_m *API) CreateUser(user *model.User) (*model.User, *model.AppError) { ret := _m.Called(user) @@ -310,6 +389,20 @@ func (_m *API) DeleteChannelMember(channelId string, userId string) *model.AppEr return r0 } +// DeleteCommand provides a mock function with given fields: commandID +func (_m *API) DeleteCommand(commandID string) error { + ret := _m.Called(commandID) + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(commandID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // DeleteEphemeralPost provides a mock function with given fields: userId, postId func (_m *API) DeleteEphemeralPost(userId string, postId string) { _m.Called(userId, postId) @@ -331,6 +424,22 @@ func (_m *API) DeletePost(postId string) *model.AppError { return r0 } +// DeletePreferencesForUser provides a mock function with given fields: userId, preferences +func (_m *API) DeletePreferencesForUser(userId string, preferences []model.Preference) *model.AppError { + ret := _m.Called(userId, preferences) + + var r0 *model.AppError + if rf, ok := ret.Get(0).(func(string, []model.Preference) *model.AppError); ok { + r0 = rf(userId, preferences) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*model.AppError) + } + } + + return r0 +} + // DeleteTeam provides a mock function with given fields: teamId func (_m *API) DeleteTeam(teamId string) *model.AppError { ret := _m.Called(teamId) @@ -411,6 +520,29 @@ func (_m *API) EnablePlugin(id string) *model.AppError { return r0 } +// ExecuteSlashCommand provides a mock function with given fields: commandArgs +func (_m *API) ExecuteSlashCommand(commandArgs *model.CommandArgs) (*model.CommandResponse, error) { + ret := _m.Called(commandArgs) + + var r0 *model.CommandResponse + if rf, ok := ret.Get(0).(func(*model.CommandArgs) *model.CommandResponse); ok { + r0 = rf(commandArgs) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*model.CommandResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(*model.CommandArgs) error); ok { + r1 = rf(commandArgs) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetBot provides a mock function with given fields: botUserId, includeDeleted func (_m *API) GetBot(botUserId string, includeDeleted bool) (*model.Bot, *model.AppError) { ret := _m.Called(botUserId, includeDeleted) @@ -732,6 +864,29 @@ func (_m *API) GetChannelsForTeamForUser(teamId string, userId string, includeDe return r0, r1 } +// GetCommand provides a mock function with given fields: commandID +func (_m *API) GetCommand(commandID string) (*model.Command, error) { + ret := _m.Called(commandID) + + var r0 *model.Command + if rf, ok := ret.Get(0).(func(string) *model.Command); ok { + r0 = rf(commandID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*model.Command) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(commandID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetConfig provides a mock function with given fields: func (_m *API) GetConfig() *model.Config { ret := _m.Called() @@ -944,6 +1099,31 @@ func (_m *API) GetFileInfo(fileId string) (*model.FileInfo, *model.AppError) { return r0, r1 } +// GetFileInfos provides a mock function with given fields: page, perPage, opt +func (_m *API) GetFileInfos(page int, perPage int, opt *model.GetFileInfosOptions) ([]*model.FileInfo, *model.AppError) { + ret := _m.Called(page, perPage, opt) + + var r0 []*model.FileInfo + if rf, ok := ret.Get(0).(func(int, int, *model.GetFileInfosOptions) []*model.FileInfo); ok { + r0 = rf(page, perPage, opt) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*model.FileInfo) + } + } + + var r1 *model.AppError + if rf, ok := ret.Get(1).(func(int, int, *model.GetFileInfosOptions) *model.AppError); ok { + r1 = rf(page, perPage, opt) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*model.AppError) + } + } + + return r0, r1 +} + // GetFileLink provides a mock function with given fields: fileId func (_m *API) GetFileLink(fileId string) (string, *model.AppError) { ret := _m.Called(fileId) @@ -967,6 +1147,56 @@ func (_m *API) GetFileLink(fileId string) (string, *model.AppError) { return r0, r1 } +// GetGroup provides a mock function with given fields: groupId +func (_m *API) GetGroup(groupId string) (*model.Group, *model.AppError) { + ret := _m.Called(groupId) + + var r0 *model.Group + if rf, ok := ret.Get(0).(func(string) *model.Group); ok { + r0 = rf(groupId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*model.Group) + } + } + + var r1 *model.AppError + if rf, ok := ret.Get(1).(func(string) *model.AppError); ok { + r1 = rf(groupId) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*model.AppError) + } + } + + return r0, r1 +} + +// GetGroupByName provides a mock function with given fields: name +func (_m *API) GetGroupByName(name string) (*model.Group, *model.AppError) { + ret := _m.Called(name) + + var r0 *model.Group + if rf, ok := ret.Get(0).(func(string) *model.Group); ok { + r0 = rf(name) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*model.Group) + } + } + + var r1 *model.AppError + if rf, ok := ret.Get(1).(func(string) *model.AppError); ok { + r1 = rf(name) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*model.AppError) + } + } + + return r0, r1 +} + // GetGroupChannel provides a mock function with given fields: userIds func (_m *API) GetGroupChannel(userIds []string) (*model.Channel, *model.AppError) { ret := _m.Called(userIds) @@ -992,6 +1222,31 @@ func (_m *API) GetGroupChannel(userIds []string) (*model.Channel, *model.AppErro return r0, r1 } +// GetGroupsForUser provides a mock function with given fields: userId +func (_m *API) GetGroupsForUser(userId string) ([]*model.Group, *model.AppError) { + ret := _m.Called(userId) + + var r0 []*model.Group + if rf, ok := ret.Get(0).(func(string) []*model.Group); ok { + r0 = rf(userId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*model.Group) + } + } + + var r1 *model.AppError + if rf, ok := ret.Get(1).(func(string) *model.AppError); ok { + r1 = rf(userId) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*model.AppError) + } + } + + return r0, r1 +} + // GetLDAPUserAttributes provides a mock function with given fields: userId, attributes func (_m *API) GetLDAPUserAttributes(userId string, attributes []string) (map[string]string, *model.AppError) { ret := _m.Called(userId, attributes) @@ -1249,6 +1504,31 @@ func (_m *API) GetPostsSince(channelId string, time int64) (*model.PostList, *mo return r0, r1 } +// GetPreferencesForUser provides a mock function with given fields: userId +func (_m *API) GetPreferencesForUser(userId string) ([]model.Preference, *model.AppError) { + ret := _m.Called(userId) + + var r0 []model.Preference + if rf, ok := ret.Get(0).(func(string) []model.Preference); ok { + r0 = rf(userId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]model.Preference) + } + } + + var r1 *model.AppError + if rf, ok := ret.Get(1).(func(string) *model.AppError); ok { + r1 = rf(userId) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*model.AppError) + } + } + + return r0, r1 +} + // GetProfileImage provides a mock function with given fields: userId func (_m *API) GetProfileImage(userId string) ([]byte, *model.AppError) { ret := _m.Called(userId) @@ -1636,6 +1916,36 @@ func (_m *API) GetTeamsUnreadForUser(userId string) ([]*model.TeamUnread, *model return r0, r1 } +// GetTelemetryId provides a mock function with given fields: +func (_m *API) GetTelemetryId() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// GetUnsanitizedConfig provides a mock function with given fields: +func (_m *API) GetUnsanitizedConfig() *model.Config { + ret := _m.Called() + + var r0 *model.Config + if rf, ok := ret.Get(0).(func() *model.Config); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*model.Config) + } + } + + return r0 +} + // GetUser provides a mock function with given fields: userId func (_m *API) GetUser(userId string) (*model.User, *model.AppError) { ret := _m.Called(userId) @@ -1903,6 +2213,54 @@ func (_m *API) HasPermissionToTeam(userId string, teamId string, permission *mod return r0 } +// InstallPlugin provides a mock function with given fields: file, replace +func (_m *API) InstallPlugin(file io.Reader, replace bool) (*model.Manifest, *model.AppError) { + ret := _m.Called(file, replace) + + var r0 *model.Manifest + if rf, ok := ret.Get(0).(func(io.Reader, bool) *model.Manifest); ok { + r0 = rf(file, replace) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*model.Manifest) + } + } + + var r1 *model.AppError + if rf, ok := ret.Get(1).(func(io.Reader, bool) *model.AppError); ok { + r1 = rf(file, replace) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*model.AppError) + } + } + + return r0, r1 +} + +// KVCompareAndDelete provides a mock function with given fields: key, oldValue +func (_m *API) KVCompareAndDelete(key string, oldValue []byte) (bool, *model.AppError) { + ret := _m.Called(key, oldValue) + + var r0 bool + if rf, ok := ret.Get(0).(func(string, []byte) bool); ok { + r0 = rf(key, oldValue) + } else { + r0 = ret.Get(0).(bool) + } + + var r1 *model.AppError + if rf, ok := ret.Get(1).(func(string, []byte) *model.AppError); ok { + r1 = rf(key, oldValue) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*model.AppError) + } + } + + return r0, r1 +} + // KVCompareAndSet provides a mock function with given fields: key, oldValue, newValue func (_m *API) KVCompareAndSet(key string, oldValue []byte, newValue []byte) (bool, *model.AppError) { ret := _m.Called(key, oldValue, newValue) @@ -2040,6 +2398,121 @@ func (_m *API) KVSetWithExpiry(key string, value []byte, expireInSeconds int64) return r0 } +// KVSetWithOptions provides a mock function with given fields: key, value, options +func (_m *API) KVSetWithOptions(key string, value []byte, options model.PluginKVSetOptions) (bool, *model.AppError) { + ret := _m.Called(key, value, options) + + var r0 bool + if rf, ok := ret.Get(0).(func(string, []byte, model.PluginKVSetOptions) bool); ok { + r0 = rf(key, value, options) + } else { + r0 = ret.Get(0).(bool) + } + + var r1 *model.AppError + if rf, ok := ret.Get(1).(func(string, []byte, model.PluginKVSetOptions) *model.AppError); ok { + r1 = rf(key, value, options) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*model.AppError) + } + } + + return r0, r1 +} + +// ListBuiltInCommands provides a mock function with given fields: +func (_m *API) ListBuiltInCommands() ([]*model.Command, error) { + ret := _m.Called() + + var r0 []*model.Command + if rf, ok := ret.Get(0).(func() []*model.Command); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*model.Command) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ListCommands provides a mock function with given fields: teamID +func (_m *API) ListCommands(teamID string) ([]*model.Command, error) { + ret := _m.Called(teamID) + + var r0 []*model.Command + if rf, ok := ret.Get(0).(func(string) []*model.Command); ok { + r0 = rf(teamID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*model.Command) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(teamID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ListCustomCommands provides a mock function with given fields: teamID +func (_m *API) ListCustomCommands(teamID string) ([]*model.Command, error) { + ret := _m.Called(teamID) + + var r0 []*model.Command + if rf, ok := ret.Get(0).(func(string) []*model.Command); ok { + r0 = rf(teamID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*model.Command) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(teamID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ListPluginCommands provides a mock function with given fields: teamID +func (_m *API) ListPluginCommands(teamID string) ([]*model.Command, error) { + ret := _m.Called(teamID) + + var r0 []*model.Command + if rf, ok := ret.Get(0).(func(string) []*model.Command); ok { + r0 = rf(teamID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*model.Command) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(teamID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // LoadPluginConfiguration provides a mock function with given fields: dest func (_m *API) LoadPluginConfiguration(dest interface{}) error { ret := _m.Called(dest) @@ -2143,6 +2616,38 @@ func (_m *API) PermanentDeleteBot(botUserId string) *model.AppError { return r0 } +// PluginHTTP provides a mock function with given fields: request +func (_m *API) PluginHTTP(request *http.Request) *http.Response { + ret := _m.Called(request) + + var r0 *http.Response + if rf, ok := ret.Get(0).(func(*http.Request) *http.Response); ok { + r0 = rf(request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*http.Response) + } + } + + return r0 +} + +// PublishUserTyping provides a mock function with given fields: userId, channelId, parentId +func (_m *API) PublishUserTyping(userId string, channelId string, parentId string) *model.AppError { + ret := _m.Called(userId, channelId, parentId) + + var r0 *model.AppError + if rf, ok := ret.Get(0).(func(string, string, string) *model.AppError); ok { + r0 = rf(userId, channelId, parentId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*model.AppError) + } + } + + return r0 +} + // PublishWebSocketEvent provides a mock function with given fields: event, payload, broadcast func (_m *API) PublishWebSocketEvent(event string, payload map[string]interface{}, broadcast *model.WebsocketBroadcast) { _m.Called(event, payload, broadcast) @@ -2317,6 +2822,31 @@ func (_m *API) SearchPostsInTeam(teamId string, paramsList []*model.SearchParams return r0, r1 } +// SearchPostsInTeamForUser provides a mock function with given fields: teamId, userId, searchParams +func (_m *API) SearchPostsInTeamForUser(teamId string, userId string, searchParams model.SearchParameter) (*model.PostSearchResults, *model.AppError) { + ret := _m.Called(teamId, userId, searchParams) + + var r0 *model.PostSearchResults + if rf, ok := ret.Get(0).(func(string, string, model.SearchParameter) *model.PostSearchResults); ok { + r0 = rf(teamId, userId, searchParams) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*model.PostSearchResults) + } + } + + var r1 *model.AppError + if rf, ok := ret.Get(1).(func(string, string, model.SearchParameter) *model.AppError); ok { + r1 = rf(teamId, userId, searchParams) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*model.AppError) + } + } + + return r0, r1 +} + // SearchTeams provides a mock function with given fields: term func (_m *API) SearchTeams(term string) ([]*model.Team, *model.AppError) { ret := _m.Called(term) @@ -2561,6 +3091,29 @@ func (_m *API) UpdateChannelMemberRoles(channelId string, userId string, newRole return r0, r1 } +// UpdateCommand provides a mock function with given fields: commandID, updatedCmd +func (_m *API) UpdateCommand(commandID string, updatedCmd *model.Command) (*model.Command, error) { + ret := _m.Called(commandID, updatedCmd) + + var r0 *model.Command + if rf, ok := ret.Get(0).(func(string, *model.Command) *model.Command); ok { + r0 = rf(commandID, updatedCmd) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*model.Command) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, *model.Command) error); ok { + r1 = rf(commandID, updatedCmd) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // UpdateEphemeralPost provides a mock function with given fields: userId, post func (_m *API) UpdateEphemeralPost(userId string, post *model.Post) *model.Post { ret := _m.Called(userId, post) @@ -2602,6 +3155,22 @@ func (_m *API) UpdatePost(post *model.Post) (*model.Post, *model.AppError) { return r0, r1 } +// UpdatePreferencesForUser provides a mock function with given fields: userId, preferences +func (_m *API) UpdatePreferencesForUser(userId string, preferences []model.Preference) *model.AppError { + ret := _m.Called(userId, preferences) + + var r0 *model.AppError + if rf, ok := ret.Get(0).(func(string, []model.Preference) *model.AppError); ok { + r0 = rf(userId, preferences) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*model.AppError) + } + } + + return r0 +} + // UpdateTeam provides a mock function with given fields: team func (_m *API) UpdateTeam(team *model.Team) (*model.Team, *model.AppError) { ret := _m.Called(team) diff --git a/vendor/github.com/mattermost/mattermost-server/plugin/plugintest/doc.go b/vendor/github.com/mattermost/mattermost-server/v5/plugin/plugintest/doc.go similarity index 60% rename from vendor/github.com/mattermost/mattermost-server/plugin/plugintest/doc.go rename to vendor/github.com/mattermost/mattermost-server/v5/plugin/plugintest/doc.go index 69a51bd..ed55748 100644 --- a/vendor/github.com/mattermost/mattermost-server/plugin/plugintest/doc.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/plugin/plugintest/doc.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. // The plugintest package provides mocks that can be used to test plugins. // @@ -7,5 +7,5 @@ // https://godoc.org/github.com/stretchr/testify/mock // // If you need to import the mock package, you can import it with -// "github.com/mattermost/mattermost-server/plugin/plugintest/mock". +// "github.com/mattermost/mattermost-server/v5/plugin/plugintest/mock". package plugintest diff --git a/vendor/github.com/mattermost/mattermost-server/v5/plugin/plugintest/helpers.go b/vendor/github.com/mattermost/mattermost-server/v5/plugin/plugintest/helpers.go new file mode 100644 index 0000000..48baa60 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/plugin/plugintest/helpers.go @@ -0,0 +1,257 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +// Regenerate this file using `make plugin-mocks`. + +package plugintest + +import ( + model "github.com/mattermost/mattermost-server/v5/model" + plugin "github.com/mattermost/mattermost-server/v5/plugin" + mock "github.com/stretchr/testify/mock" +) + +// Helpers is an autogenerated mock type for the Helpers type +type Helpers struct { + mock.Mock +} + +// CheckRequiredServerConfiguration provides a mock function with given fields: req +func (_m *Helpers) CheckRequiredServerConfiguration(req *model.Config) (bool, error) { + ret := _m.Called(req) + + var r0 bool + if rf, ok := ret.Get(0).(func(*model.Config) bool); ok { + r0 = rf(req) + } else { + r0 = ret.Get(0).(bool) + } + + var r1 error + if rf, ok := ret.Get(1).(func(*model.Config) error); ok { + r1 = rf(req) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EnsureBot provides a mock function with given fields: bot, options +func (_m *Helpers) EnsureBot(bot *model.Bot, options ...plugin.EnsureBotOption) (string, error) { + _va := make([]interface{}, len(options)) + for _i := range options { + _va[_i] = options[_i] + } + var _ca []interface{} + _ca = append(_ca, bot) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 string + if rf, ok := ret.Get(0).(func(*model.Bot, ...plugin.EnsureBotOption) string); ok { + r0 = rf(bot, options...) + } else { + r0 = ret.Get(0).(string) + } + + var r1 error + if rf, ok := ret.Get(1).(func(*model.Bot, ...plugin.EnsureBotOption) error); ok { + r1 = rf(bot, options...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPluginAssetURL provides a mock function with given fields: pluginID, asset +func (_m *Helpers) GetPluginAssetURL(pluginID string, asset string) (string, error) { + ret := _m.Called(pluginID, asset) + + var r0 string + if rf, ok := ret.Get(0).(func(string, string) string); ok { + r0 = rf(pluginID, asset) + } else { + r0 = ret.Get(0).(string) + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(pluginID, asset) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// InstallPluginFromURL provides a mock function with given fields: downloadURL, replace +func (_m *Helpers) InstallPluginFromURL(downloadURL string, replace bool) (*model.Manifest, error) { + ret := _m.Called(downloadURL, replace) + + var r0 *model.Manifest + if rf, ok := ret.Get(0).(func(string, bool) *model.Manifest); ok { + r0 = rf(downloadURL, replace) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*model.Manifest) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, bool) error); ok { + r1 = rf(downloadURL, replace) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// KVCompareAndDeleteJSON provides a mock function with given fields: key, oldValue +func (_m *Helpers) KVCompareAndDeleteJSON(key string, oldValue interface{}) (bool, error) { + ret := _m.Called(key, oldValue) + + var r0 bool + if rf, ok := ret.Get(0).(func(string, interface{}) bool); ok { + r0 = rf(key, oldValue) + } else { + r0 = ret.Get(0).(bool) + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, interface{}) error); ok { + r1 = rf(key, oldValue) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// KVCompareAndSetJSON provides a mock function with given fields: key, oldValue, newValue +func (_m *Helpers) KVCompareAndSetJSON(key string, oldValue interface{}, newValue interface{}) (bool, error) { + ret := _m.Called(key, oldValue, newValue) + + var r0 bool + if rf, ok := ret.Get(0).(func(string, interface{}, interface{}) bool); ok { + r0 = rf(key, oldValue, newValue) + } else { + r0 = ret.Get(0).(bool) + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, interface{}, interface{}) error); ok { + r1 = rf(key, oldValue, newValue) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// KVGetJSON provides a mock function with given fields: key, value +func (_m *Helpers) KVGetJSON(key string, value interface{}) (bool, error) { + ret := _m.Called(key, value) + + var r0 bool + if rf, ok := ret.Get(0).(func(string, interface{}) bool); ok { + r0 = rf(key, value) + } else { + r0 = ret.Get(0).(bool) + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, interface{}) error); ok { + r1 = rf(key, value) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// KVListWithOptions provides a mock function with given fields: options +func (_m *Helpers) KVListWithOptions(options ...plugin.KVListOption) ([]string, error) { + _va := make([]interface{}, len(options)) + for _i := range options { + _va[_i] = options[_i] + } + var _ca []interface{} + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 []string + if rf, ok := ret.Get(0).(func(...plugin.KVListOption) []string); ok { + r0 = rf(options...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(...plugin.KVListOption) error); ok { + r1 = rf(options...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// KVSetJSON provides a mock function with given fields: key, value +func (_m *Helpers) KVSetJSON(key string, value interface{}) error { + ret := _m.Called(key, value) + + var r0 error + if rf, ok := ret.Get(0).(func(string, interface{}) error); ok { + r0 = rf(key, value) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// KVSetWithExpiryJSON provides a mock function with given fields: key, value, expireInSeconds +func (_m *Helpers) KVSetWithExpiryJSON(key string, value interface{}, expireInSeconds int64) error { + ret := _m.Called(key, value, expireInSeconds) + + var r0 error + if rf, ok := ret.Get(0).(func(string, interface{}, int64) error); ok { + r0 = rf(key, value, expireInSeconds) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ShouldProcessMessage provides a mock function with given fields: post, options +func (_m *Helpers) ShouldProcessMessage(post *model.Post, options ...plugin.ShouldProcessMessageOption) (bool, error) { + _va := make([]interface{}, len(options)) + for _i := range options { + _va[_i] = options[_i] + } + var _ca []interface{} + _ca = append(_ca, post) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 bool + if rf, ok := ret.Get(0).(func(*model.Post, ...plugin.ShouldProcessMessageOption) bool); ok { + r0 = rf(post, options...) + } else { + r0 = ret.Get(0).(bool) + } + + var r1 error + if rf, ok := ret.Get(1).(func(*model.Post, ...plugin.ShouldProcessMessageOption) error); ok { + r1 = rf(post, options...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/vendor/github.com/mattermost/mattermost-server/plugin/plugintest/hooks.go b/vendor/github.com/mattermost/mattermost-server/v5/plugin/plugintest/hooks.go similarity index 91% rename from vendor/github.com/mattermost/mattermost-server/plugin/plugintest/hooks.go rename to vendor/github.com/mattermost/mattermost-server/v5/plugin/plugintest/hooks.go index 5ebeecc..56842ad 100644 --- a/vendor/github.com/mattermost/mattermost-server/plugin/plugintest/hooks.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/plugin/plugintest/hooks.go @@ -4,11 +4,16 @@ package plugintest -import http "net/http" -import io "io" -import mock "github.com/stretchr/testify/mock" -import model "github.com/mattermost/mattermost-server/model" -import plugin "github.com/mattermost/mattermost-server/plugin" +import ( + io "io" + http "net/http" + + mock "github.com/stretchr/testify/mock" + + model "github.com/mattermost/mattermost-server/v5/model" + + plugin "github.com/mattermost/mattermost-server/v5/plugin" +) // Hooks is an autogenerated mock type for the Hooks type type Hooks struct { @@ -189,6 +194,16 @@ func (_m *Hooks) OnDeactivate() error { return r0 } +// ReactionHasBeenAdded provides a mock function with given fields: c, reaction +func (_m *Hooks) ReactionHasBeenAdded(c *plugin.Context, reaction *model.Reaction) { + _m.Called(c, reaction) +} + +// ReactionHasBeenRemoved provides a mock function with given fields: c, reaction +func (_m *Hooks) ReactionHasBeenRemoved(c *plugin.Context, reaction *model.Reaction) { + _m.Called(c, reaction) +} + // ServeHTTP provides a mock function with given fields: c, w, r func (_m *Hooks) ServeHTTP(c *plugin.Context, w http.ResponseWriter, r *http.Request) { _m.Called(c, w, r) diff --git a/vendor/github.com/mattermost/mattermost-server/plugin/plugintest/mock/mock.go b/vendor/github.com/mattermost/mattermost-server/v5/plugin/plugintest/mock/mock.go similarity index 88% rename from vendor/github.com/mattermost/mattermost-server/plugin/plugintest/mock/mock.go rename to vendor/github.com/mattermost/mattermost-server/v5/plugin/plugintest/mock/mock.go index 6d593a4..397ba42 100644 --- a/vendor/github.com/mattermost/mattermost-server/plugin/plugintest/mock/mock.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/plugin/plugintest/mock/mock.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. // This package provides aliases for the contents of "github.com/stretchr/testify/mock". Because // external packages can't import our vendored dependencies, this is necessary for them to be able diff --git a/vendor/github.com/mattermost/mattermost-server/v5/plugin/stringifier.go b/vendor/github.com/mattermost/mattermost-server/v5/plugin/stringifier.go new file mode 100644 index 0000000..6f4fcfb --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/plugin/stringifier.go @@ -0,0 +1,31 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package plugin + +import ( + "fmt" +) + +func stringify(objects []interface{}) []string { + stringified := make([]string, len(objects)) + for i, object := range objects { + stringified[i] = fmt.Sprintf("%+v", object) + } + return stringified +} + +func toObjects(strings []string) []interface{} { + if strings == nil { + return nil + } + objects := make([]interface{}, len(strings)) + for i, string := range strings { + objects[i] = string + } + return objects +} + +func stringifyToObjects(objects []interface{}) []interface{} { + return toObjects(stringify(objects)) +} diff --git a/vendor/github.com/mattermost/mattermost-server/plugin/supervisor.go b/vendor/github.com/mattermost/mattermost-server/v5/plugin/supervisor.go similarity index 75% rename from vendor/github.com/mattermost/mattermost-server/plugin/supervisor.go rename to vendor/github.com/mattermost/mattermost-server/v5/plugin/supervisor.go index d07ece8..2ee4141 100644 --- a/vendor/github.com/mattermost/mattermost-server/plugin/supervisor.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/plugin/supervisor.go @@ -9,21 +9,25 @@ import ( "path/filepath" "runtime" "strings" + "sync" "time" plugin "github.com/hashicorp/go-plugin" - "github.com/mattermost/mattermost-server/mlog" - "github.com/mattermost/mattermost-server/model" + + "github.com/mattermost/mattermost-server/v5/einterfaces" + "github.com/mattermost/mattermost-server/v5/mlog" + "github.com/mattermost/mattermost-server/v5/model" ) type supervisor struct { + lock sync.RWMutex client *plugin.Client hooks Hooks implemented [TotalHooksId]bool pid int } -func newSupervisor(pluginInfo *model.BundleInfo, parentLogger *mlog.Logger, apiImpl API) (retSupervisor *supervisor, retErr error) { +func newSupervisor(pluginInfo *model.BundleInfo, apiImpl API, parentLogger *mlog.Logger, metrics einterfaces.MetricsInterface) (retSupervisor *supervisor, retErr error) { sup := supervisor{} defer func() { if retErr != nil { @@ -41,7 +45,7 @@ func newSupervisor(pluginInfo *model.BundleInfo, parentLogger *mlog.Logger, apiI pluginMap := map[string]plugin.Plugin{ "hooks": &hooksPlugin{ log: wrappedLogger, - apiImpl: apiImpl, + apiImpl: &apiTimerLayer{pluginInfo.Manifest.Id, apiImpl, metrics}, }, } @@ -78,7 +82,7 @@ func newSupervisor(pluginInfo *model.BundleInfo, parentLogger *mlog.Logger, apiI return nil, err } - sup.hooks = raw.(Hooks) + sup.hooks = &hooksTimerLayer{pluginInfo.Manifest.Id, raw.(Hooks), metrics} impl, err := sup.hooks.Implemented() if err != nil { @@ -90,35 +94,35 @@ func newSupervisor(pluginInfo *model.BundleInfo, parentLogger *mlog.Logger, apiI } } - err = sup.Hooks().OnActivate() - if err != nil { - return nil, err - } - return &sup, nil } func (sup *supervisor) Shutdown() { + sup.lock.RLock() + defer sup.lock.RUnlock() if sup.client != nil { sup.client.Kill() } } func (sup *supervisor) Hooks() Hooks { + sup.lock.RLock() + defer sup.lock.RUnlock() return sup.hooks } // PerformHealthCheck checks the plugin through an an RPC ping. func (sup *supervisor) PerformHealthCheck() error { + // No need for a lock here because Ping is read-locked. if pingErr := sup.Ping(); pingErr != nil { - for pingFails := 1; pingFails < HEALTH_CHECK_PING_FAIL_LIMIT; pingFails++ { + for pingFails := 1; pingFails < HealthCheckPingFailLimit; pingFails++ { pingErr = sup.Ping() if pingErr == nil { break } } if pingErr != nil { - mlog.Debug(fmt.Sprintf("Error pinging plugin, error: %s", pingErr.Error())) + mlog.Debug("Error pinging plugin", mlog.Err(pingErr)) return fmt.Errorf("Plugin RPC connection is not responding") } } @@ -128,8 +132,9 @@ func (sup *supervisor) PerformHealthCheck() error { // Ping checks that the RPC connection with the plugin is alive and healthy. func (sup *supervisor) Ping() error { + sup.lock.RLock() + defer sup.lock.RUnlock() client, err := sup.client.Client() - if err != nil { return err } @@ -138,5 +143,7 @@ func (sup *supervisor) Ping() error { } func (sup *supervisor) Implements(hookId int) bool { + sup.lock.RLock() + defer sup.lock.RUnlock() return sup.implemented[hookId] } diff --git a/vendor/github.com/mattermost/mattermost-server/services/timezones/default.go b/vendor/github.com/mattermost/mattermost-server/v5/services/timezones/default.go similarity index 99% rename from vendor/github.com/mattermost/mattermost-server/services/timezones/default.go rename to vendor/github.com/mattermost/mattermost-server/v5/services/timezones/default.go index 065d0ee..3d835f7 100644 --- a/vendor/github.com/mattermost/mattermost-server/services/timezones/default.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/services/timezones/default.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package timezones @@ -378,7 +378,6 @@ var DefaultSupportedTimezones = []string{ "CST6CDT", "Canada/Atlantic", "Canada/Central", - "Canada/East-Saskatchewan", "Canada/Eastern", "Canada/Mountain", "Canada/Newfoundland", @@ -491,7 +490,6 @@ var DefaultSupportedTimezones = []string{ "Europe/Zagreb", "Europe/Zaporozhye", "Europe/Zurich", - "Factory", "GB", "GB-Eire", "GMT", diff --git a/vendor/github.com/mattermost/mattermost-server/services/timezones/timezones.go b/vendor/github.com/mattermost/mattermost-server/v5/services/timezones/timezones.go similarity index 93% rename from vendor/github.com/mattermost/mattermost-server/services/timezones/timezones.go rename to vendor/github.com/mattermost/mattermost-server/v5/services/timezones/timezones.go index ab955bd..e211a4c 100644 --- a/vendor/github.com/mattermost/mattermost-server/services/timezones/timezones.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/services/timezones/timezones.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package timezones diff --git a/vendor/github.com/mattermost/mattermost-server/v5/utils/api.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/api.go new file mode 100644 index 0000000..ce04537 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/api.go @@ -0,0 +1,141 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package utils + +import ( + "crypto" + "crypto/rand" + "encoding/base64" + "fmt" + "html/template" + "net/http" + "net/url" + "path" + "strings" + + "github.com/mattermost/mattermost-server/v5/model" +) + +func CheckOrigin(r *http.Request, allowedOrigins string) bool { + origin := r.Header.Get("Origin") + if origin == "" { + return true + } + + if allowedOrigins == "*" { + return true + } + for _, allowed := range strings.Split(allowedOrigins, " ") { + if allowed == origin { + return true + } + } + return false +} + +func OriginChecker(allowedOrigins string) func(*http.Request) bool { + return func(r *http.Request) bool { + return CheckOrigin(r, allowedOrigins) + } +} + +func RenderWebAppError(config *model.Config, w http.ResponseWriter, r *http.Request, err *model.AppError, s crypto.Signer) { + RenderWebError(config, w, r, err.StatusCode, url.Values{ + "message": []string{err.Message}, + }, s) +} + +func RenderWebError(config *model.Config, w http.ResponseWriter, r *http.Request, status int, params url.Values, s crypto.Signer) { + queryString := params.Encode() + + subpath, _ := GetSubpathFromConfig(config) + + h := crypto.SHA256 + sum := h.New() + sum.Write([]byte(path.Join(subpath, "error") + "?" + queryString)) + signature, err := s.Sign(rand.Reader, sum.Sum(nil), h) + if err != nil { + http.Error(w, "", http.StatusInternalServerError) + return + } + destination := path.Join(subpath, "error") + "?" + queryString + "&s=" + base64.URLEncoding.EncodeToString(signature) + + if status >= 300 && status < 400 { + http.Redirect(w, r, destination, status) + return + } + + w.Header().Set("Content-Type", "text/html") + w.WriteHeader(status) + fmt.Fprintln(w, ``) + fmt.Fprintln(w, ``) + fmt.Fprintln(w, ``) + fmt.Fprintln(w, `...`) + fmt.Fprintln(w, ``) +} + +func RenderMobileAuthComplete(w http.ResponseWriter, redirectUrl string) { + RenderMobileMessage(w, ` +
    + +
    +

    `+T("api.oauth.auth_complete")+`

    +

    `+T("api.oauth.redirecting_back")+`

    + + + + `) +} + +func RenderMobileError(config *model.Config, w http.ResponseWriter, err *model.AppError, redirectURL string) { + RenderMobileMessage(w, ` +
    + +
    +

    `+T("error")+`

    +

    `+err.Message+`

    + + `+T("api.back_to_app", map[string]interface{}{"SiteName": config.TeamSettings.SiteName})+` + + `) +} + +func RenderMobileMessage(w http.ResponseWriter, message string) { + w.Header().Set("Content-Type", "text/html") + fmt.Fprintln(w, ` + + + + + + + + + + +
    +
    + `+message+` +
    +
    + + + `) +} diff --git a/vendor/github.com/mattermost/mattermost-server/v5/utils/archive.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/archive.go new file mode 100644 index 0000000..cf8d740 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/archive.go @@ -0,0 +1,65 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package utils + +import ( + "archive/zip" + "fmt" + "io" + "os" + "path/filepath" + "strings" +) + +func sanitizePath(p string) string { + dir := strings.ReplaceAll(filepath.Dir(filepath.Clean(p)), "..", "") + base := filepath.Base(p) + if strings.Count(base, ".") == len(base) { + return "" + } + return filepath.Join(dir, base) +} + +// UnzipToPath extracts a given zip archive into a given path. +// It returns a list of extracted paths. +func UnzipToPath(zipFile io.ReaderAt, size int64, outPath string) ([]string, error) { + rd, err := zip.NewReader(zipFile, size) + if err != nil { + return nil, fmt.Errorf("failed to create reader: %w", err) + } + + paths := make([]string, len(rd.File)) + for i, f := range rd.File { + filePath := sanitizePath(f.Name) + if filePath == "" { + return nil, fmt.Errorf("invalid filepath `%s`", f.Name) + } + path := filepath.Join(outPath, filePath) + paths[i] = path + if f.FileInfo().IsDir() { + if err := os.Mkdir(path, 0744); err != nil { + return nil, fmt.Errorf("failed to create directory: %w", err) + } + continue + } + + outFile, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return nil, fmt.Errorf("failed to create file: %w", err) + } + defer outFile.Close() + + file, err := f.Open() + if err != nil { + return nil, fmt.Errorf("failed to open file: %w", err) + } + defer file.Close() + + if _, err := io.Copy(outFile, file); err != nil { + return nil, fmt.Errorf("failed to write to file: %w", err) + } + } + + return paths, nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/utils/authorization.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/authorization.go similarity index 98% rename from vendor/github.com/mattermost/mattermost-server/utils/authorization.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/authorization.go index 5125b45..fcd88b8 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/authorization.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/authorization.go @@ -1,10 +1,10 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package utils import ( - "github.com/mattermost/mattermost-server/model" + "github.com/mattermost/mattermost-server/v5/model" ) func SetRolePermissionsFromConfig(roles map[string]*model.Role, cfg *model.Config, isLicensed bool) map[string]*model.Role { diff --git a/vendor/github.com/mattermost/mattermost-server/utils/backoff.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/backoff.go similarity index 84% rename from vendor/github.com/mattermost/mattermost-server/utils/backoff.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/backoff.go index be6dcc9..08a996f 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/backoff.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/backoff.go @@ -1,5 +1,5 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package utils diff --git a/vendor/github.com/mattermost/mattermost-server/utils/emoji.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/emoji.go similarity index 55% rename from vendor/github.com/mattermost/mattermost-server/utils/emoji.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/emoji.go index d7e8a8b..bfad8ee 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/emoji.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/emoji.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package utils @@ -11,14 +11,15 @@ import ( "image/jpeg" "image/png" "testing" + + "github.com/stretchr/testify/require" ) func CreateTestGif(t *testing.T, width int, height int) []byte { var buffer bytes.Buffer - if err := gif.Encode(&buffer, image.NewRGBA(image.Rect(0, 0, width, height)), nil); err != nil { - t.Fatalf("failed to create gif: %v", err.Error()) - } + err := gif.Encode(&buffer, image.NewRGBA(image.Rect(0, 0, width, height)), nil) + require.NoErrorf(t, err, "failed to create gif: %v", err) return buffer.Bytes() } @@ -34,9 +35,8 @@ func CreateTestAnimatedGif(t *testing.T, width int, height int, frames int) []by img.Image[i] = image.NewPaletted(image.Rect(0, 0, width, height), color.Palette{color.Black}) img.Delay[i] = 0 } - if err := gif.EncodeAll(&buffer, &img); err != nil { - t.Fatalf("failed to create animated gif: %v", err.Error()) - } + err := gif.EncodeAll(&buffer, &img) + require.NoErrorf(t, err, "failed to create animated gif: %v", err) return buffer.Bytes() } @@ -44,9 +44,8 @@ func CreateTestAnimatedGif(t *testing.T, width int, height int, frames int) []by func CreateTestJpeg(t *testing.T, width int, height int) []byte { var buffer bytes.Buffer - if err := jpeg.Encode(&buffer, image.NewRGBA(image.Rect(0, 0, width, height)), nil); err != nil { - t.Fatalf("failed to create jpeg: %v", err.Error()) - } + err := jpeg.Encode(&buffer, image.NewRGBA(image.Rect(0, 0, width, height)), nil) + require.NoErrorf(t, err, "failed to create jpeg: %v", err) return buffer.Bytes() } @@ -54,9 +53,8 @@ func CreateTestJpeg(t *testing.T, width int, height int) []byte { func CreateTestPng(t *testing.T, width int, height int) []byte { var buffer bytes.Buffer - if err := png.Encode(&buffer, image.NewRGBA(image.Rect(0, 0, width, height))); err != nil { - t.Fatalf("failed to create png: %v", err.Error()) - } + err := png.Encode(&buffer, image.NewRGBA(image.Rect(0, 0, width, height))) + require.NoErrorf(t, err, "failed to create png: %v", err) return buffer.Bytes() } diff --git a/vendor/github.com/mattermost/mattermost-server/utils/file.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/file.go similarity index 94% rename from vendor/github.com/mattermost/mattermost-server/utils/file.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/file.go index 923c559..b4de4bd 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/file.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/file.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package utils diff --git a/vendor/github.com/mattermost/mattermost-server/utils/fileutils/fileutils.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/fileutils/fileutils.go similarity index 65% rename from vendor/github.com/mattermost/mattermost-server/utils/fileutils/fileutils.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/fileutils/fileutils.go index 81c3741..62ad3ad 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/fileutils/fileutils.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/fileutils/fileutils.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package fileutils @@ -17,7 +17,7 @@ var ( } ) -func FindPath(path string, baseSearchPaths []string, filter func(os.FileInfo) bool) string { +func findPath(path string, baseSearchPaths []string, workingDirFirst bool, filter func(os.FileInfo) bool) string { if filepath.IsAbs(path) { if _, err := os.Stat(path); err == nil { return path @@ -27,9 +27,12 @@ func FindPath(path string, baseSearchPaths []string, filter func(os.FileInfo) bo } searchPaths := []string{} - searchPaths = append(searchPaths, baseSearchPaths...) + if workingDirFirst { + searchPaths = append(searchPaths, baseSearchPaths...) + } - // Additionally attempt to search relative to the location of the running binary. + // Attempt to search relative to the location of the running binary either before + // or after searching relative to the working directory, depending on `workingDirFirst`. var binaryDir string if exe, err := os.Executable(); err == nil { if exe, err = filepath.EvalSymlinks(exe); err == nil { @@ -47,6 +50,10 @@ func FindPath(path string, baseSearchPaths []string, filter func(os.FileInfo) bo } } + if !workingDirFirst { + searchPaths = append(searchPaths, baseSearchPaths...) + } + for _, parent := range searchPaths { found, err := filepath.Abs(filepath.Join(parent, path)) if err != nil { @@ -65,6 +72,10 @@ func FindPath(path string, baseSearchPaths []string, filter func(os.FileInfo) bo return "" } +func FindPath(path string, baseSearchPaths []string, filter func(os.FileInfo) bool) string { + return findPath(path, baseSearchPaths, true, filter) +} + // FindFile looks for the given file in nearby ancestors relative to the current working // directory as well as the directory of the executable. func FindFile(path string) string { @@ -85,3 +96,15 @@ func FindDir(dir string) (string, bool) { return found, true } + +// FindDirRelBinary looks for the given directory in nearby ancestors relative to the +// directory of the executable, then relative to the working directory, falling back to `./` if not found. +func FindDirRelBinary(dir string) (string, bool) { + found := findPath(dir, commonBaseSearchPaths, false, func(fileInfo os.FileInfo) bool { + return fileInfo.IsDir() + }) + if found == "" { + return "./", false + } + return found, true +} diff --git a/vendor/github.com/mattermost/mattermost-server/utils/hash.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/hash.go similarity index 63% rename from vendor/github.com/mattermost/mattermost-server/utils/hash.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/hash.go index cc82fd9..49f63b7 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/hash.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/hash.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package utils diff --git a/vendor/github.com/mattermost/mattermost-server/utils/html.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/html.go similarity index 75% rename from vendor/github.com/mattermost/mattermost-server/utils/html.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/html.go index a3512c5..d2aa0cd 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/html.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/html.go @@ -1,12 +1,11 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package utils import ( "bytes" "errors" - "fmt" "html/template" "io" "path/filepath" @@ -16,8 +15,9 @@ import ( "github.com/fsnotify/fsnotify" "github.com/mattermost/go-i18n/i18n" - "github.com/mattermost/mattermost-server/mlog" - "github.com/mattermost/mattermost-server/utils/fileutils" + + "github.com/mattermost/mattermost-server/v5/mlog" + "github.com/mattermost/mattermost-server/v5/utils/fileutils" ) type HTMLTemplateWatcher struct { @@ -28,7 +28,7 @@ type HTMLTemplateWatcher struct { func NewHTMLTemplateWatcher(directory string) (*HTMLTemplateWatcher, error) { templatesDir, _ := fileutils.FindDir(directory) - mlog.Debug(fmt.Sprintf("Parsing server templates at %v", templatesDir)) + mlog.Debug("Parsing server templates", mlog.String("templates_directory", templatesDir)) ret := &HTMLTemplateWatcher{ stop: make(chan struct{}), @@ -44,11 +44,11 @@ func NewHTMLTemplateWatcher(directory string) (*HTMLTemplateWatcher, error) { return nil, err } - if htmlTemplates, err := template.ParseGlob(filepath.Join(templatesDir, "*.html")); err != nil { + htmlTemplates, err := template.ParseGlob(filepath.Join(templatesDir, "*.html")) + if err != nil { return nil, err - } else { - ret.templates.Store(htmlTemplates) } + ret.templates.Store(htmlTemplates) go func() { defer close(ret.stopped) @@ -60,15 +60,15 @@ func NewHTMLTemplateWatcher(directory string) (*HTMLTemplateWatcher, error) { return case event := <-watcher.Events: if event.Op&fsnotify.Write == fsnotify.Write { - mlog.Info(fmt.Sprintf("Re-parsing templates because of modified file %v", event.Name)) + mlog.Info("Re-parsing templates because of modified file", mlog.String("file_name", event.Name)) if htmlTemplates, err := template.ParseGlob(filepath.Join(templatesDir, "*.html")); err != nil { - mlog.Error(fmt.Sprintf("Failed to parse templates %v", err)) + mlog.Error("Failed to parse templates.", mlog.Err(err)) } else { ret.templates.Store(htmlTemplates) } } case err := <-watcher.Errors: - mlog.Error(fmt.Sprintf("Failed in directory watcher %s", err)) + mlog.Error("Failed in directory watcher", mlog.Err(err)) } } }() @@ -113,7 +113,7 @@ func (t *HTMLTemplate) RenderToWriter(w io.Writer) error { } if err := t.Templates.ExecuteTemplate(w, t.TemplateName, t); err != nil { - mlog.Error(fmt.Sprintf("Error rendering template %v err=%v", t.TemplateName, err)) + mlog.Warn("Error rendering template", mlog.String("template_name", t.TemplateName), mlog.Err(err)) return err } @@ -140,7 +140,11 @@ func escapeForHtml(arg interface{}) interface{} { } return safeArg default: - mlog.Warn(fmt.Sprintf("Unable to escape value for HTML template %v of type %v", arg, reflect.ValueOf(arg).Type())) + mlog.Warn( + "Unable to escape value for HTML template", + mlog.Any("html_template", arg), + mlog.String("template_type", reflect.ValueOf(arg).Type().String()), + ) return "" } } diff --git a/vendor/github.com/mattermost/mattermost-server/utils/i18n.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/i18n.go similarity index 80% rename from vendor/github.com/mattermost/mattermost-server/utils/i18n.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/i18n.go index d042211..d8e7104 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/i18n.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/i18n.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package utils @@ -7,13 +7,15 @@ import ( "fmt" "io/ioutil" "net/http" + "os" "path/filepath" "strings" "github.com/mattermost/go-i18n/i18n" - "github.com/mattermost/mattermost-server/mlog" - "github.com/mattermost/mattermost-server/model" - "github.com/mattermost/mattermost-server/utils/fileutils" + + "github.com/mattermost/mattermost-server/v5/mlog" + "github.com/mattermost/mattermost-server/v5/model" + "github.com/mattermost/mattermost-server/v5/utils/fileutils" ) var T i18n.TranslateFunc @@ -32,7 +34,13 @@ func TranslationsPreInit() error { // segfault trying to handle the error, and the untranslated IDs are strictly better. T = TfuncWithFallback("en") TDefault = TfuncWithFallback("en") - return InitTranslationsWithDir("i18n") + + translationsDir := "i18n" + if mattermostPath := os.Getenv("MM_SERVER_PATH"); mattermostPath != "" { + translationsDir = filepath.Join(mattermostPath, "i18n") + } + + return InitTranslationsWithDir(translationsDir) } func InitTranslations(localizationSettings model.LocalizationSettings) error { @@ -44,9 +52,9 @@ func InitTranslations(localizationSettings model.LocalizationSettings) error { } func InitTranslationsWithDir(dir string) error { - i18nDirectory, found := fileutils.FindDir(dir) + i18nDirectory, found := fileutils.FindDirRelBinary(dir) if !found { - return fmt.Errorf("Unable to find i18n directory") + return fmt.Errorf(fmt.Sprintf("Unable to find i18n directory at %q", dir)) } files, _ := ioutil.ReadDir(i18nDirectory) @@ -67,7 +75,7 @@ func InitTranslationsWithDir(dir string) error { func GetTranslationsBySystemLocale() (i18n.TranslateFunc, error) { locale := *settings.DefaultServerLocale if _, ok := locales[locale]; !ok { - mlog.Error(fmt.Sprintf("Failed to load system translations for '%v' attempting to fall back to '%v'", locale, model.DEFAULT_LOCALE)) + mlog.Warn("Failed to load system translations for", mlog.String("locale", locale), mlog.String("attempting to fall back to default locale", model.DEFAULT_LOCALE)) locale = model.DEFAULT_LOCALE } @@ -80,7 +88,7 @@ func GetTranslationsBySystemLocale() (i18n.TranslateFunc, error) { return nil, fmt.Errorf("Failed to load system translations") } - mlog.Info(fmt.Sprintf("Loaded system translations for '%v' from '%v'", locale, locales[locale])) + mlog.Info("Loaded system translations", mlog.String("for locale", locale), mlog.String("from locale", locales[locale])) return translations, nil } diff --git a/vendor/github.com/mattermost/mattermost-server/utils/jsonutils/json.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/jsonutils/json.go similarity index 96% rename from vendor/github.com/mattermost/mattermost-server/utils/jsonutils/json.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/jsonutils/json.go index da77a2b..4651ec8 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/jsonutils/json.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/jsonutils/json.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package jsonutils diff --git a/vendor/github.com/mattermost/mattermost-server/utils/license.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/license.go similarity index 69% rename from vendor/github.com/mattermost/mattermost-server/utils/license.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/license.go index 338ad17..b7995a4 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/license.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/license.go @@ -1,5 +1,5 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package utils @@ -10,16 +10,15 @@ import ( "crypto/x509" "encoding/base64" "encoding/pem" - "fmt" "io/ioutil" "os" "path/filepath" "strconv" "strings" - "github.com/mattermost/mattermost-server/mlog" - "github.com/mattermost/mattermost-server/model" - "github.com/mattermost/mattermost-server/utils/fileutils" + "github.com/mattermost/mattermost-server/v5/mlog" + "github.com/mattermost/mattermost-server/v5/model" + "github.com/mattermost/mattermost-server/v5/utils/fileutils" ) var publicKey []byte = []byte(`-----BEGIN PUBLIC KEY----- @@ -37,7 +36,7 @@ func ValidateLicense(signed []byte) (bool, string) { _, err := base64.StdEncoding.Decode(decoded, signed) if err != nil { - mlog.Error(fmt.Sprintf("Encountered error decoding license, err=%v", err.Error())) + mlog.Error("Encountered error decoding license", mlog.Err(err)) return false, "" } @@ -58,7 +57,7 @@ func ValidateLicense(signed []byte) (bool, string) { public, err := x509.ParsePKIXPublicKey(block.Bytes) if err != nil { - mlog.Error(fmt.Sprintf("Encountered error signing license, err=%v", err.Error())) + mlog.Error("Encountered error signing license", mlog.Err(err)) return false, "" } @@ -70,7 +69,7 @@ func ValidateLicense(signed []byte) (bool, string) { err = rsa.VerifyPKCS1v15(rsaPublic, crypto.SHA512, d, signature) if err != nil { - mlog.Error(fmt.Sprintf("Invalid signature, err=%v", err.Error())) + mlog.Error("Invalid signature", mlog.Err(err)) return false, "" } @@ -81,32 +80,32 @@ func GetAndValidateLicenseFileFromDisk(location string) (*model.License, []byte) fileName := GetLicenseFileLocation(location) if _, err := os.Stat(fileName); err != nil { - mlog.Debug(fmt.Sprintf("We could not find the license key in the database or on disk at %v", fileName)) + mlog.Debug("We could not find the license key in the database or on disk at", mlog.String("filename", fileName)) return nil, nil } - mlog.Info(fmt.Sprintf("License key has not been uploaded. Loading license key from disk at %v", fileName)) + mlog.Info("License key has not been uploaded. Loading license key from disk at", mlog.String("filename", fileName)) licenseBytes := GetLicenseFileFromDisk(fileName) - if success, licenseStr := ValidateLicense(licenseBytes); !success { - mlog.Error(fmt.Sprintf("Found license key at %v but it appears to be invalid.", fileName)) + success, licenseStr := ValidateLicense(licenseBytes) + if !success { + mlog.Error("Found license key at %v but it appears to be invalid.", mlog.String("filename", fileName)) return nil, nil - } else { - return model.LicenseFromJson(strings.NewReader(licenseStr)), licenseBytes } + return model.LicenseFromJson(strings.NewReader(licenseStr)), licenseBytes } func GetLicenseFileFromDisk(fileName string) []byte { file, err := os.Open(fileName) if err != nil { - mlog.Error(fmt.Sprintf("Failed to open license key from disk at %v err=%v", fileName, err.Error())) + mlog.Error("Failed to open license key from disk at", mlog.String("filename", fileName), mlog.Err(err)) return nil } defer file.Close() licenseBytes, err := ioutil.ReadAll(file) if err != nil { - mlog.Error(fmt.Sprintf("Failed to read license key from disk at %v err=%v", fileName, err.Error())) + mlog.Error("Failed to read license key from disk at", mlog.String("filename", fileName), mlog.Err(err)) return nil } @@ -117,9 +116,8 @@ func GetLicenseFileLocation(fileLocation string) string { if fileLocation == "" { configDir, _ := fileutils.FindDir("config") return filepath.Join(configDir, "mattermost.mattermost-license") - } else { - return fileLocation } + return fileLocation } func GetClientLicense(l *model.License) map[string]string { @@ -140,22 +138,29 @@ func GetClientLicense(l *model.License) map[string]string { props["Metrics"] = strconv.FormatBool(*l.Features.Metrics) props["GoogleOAuth"] = strconv.FormatBool(*l.Features.GoogleOAuth) props["Office365OAuth"] = strconv.FormatBool(*l.Features.Office365OAuth) + props["OpenId"] = strconv.FormatBool(*l.Features.OpenId) props["Compliance"] = strconv.FormatBool(*l.Features.Compliance) props["MHPNS"] = strconv.FormatBool(*l.Features.MHPNS) props["Announcement"] = strconv.FormatBool(*l.Features.Announcement) props["Elasticsearch"] = strconv.FormatBool(*l.Features.Elasticsearch) props["DataRetention"] = strconv.FormatBool(*l.Features.DataRetention) + props["IDLoadedPushNotifications"] = strconv.FormatBool(*l.Features.IDLoadedPushNotifications) props["IssuedAt"] = strconv.FormatInt(l.IssuedAt, 10) props["StartsAt"] = strconv.FormatInt(l.StartsAt, 10) props["ExpiresAt"] = strconv.FormatInt(l.ExpiresAt, 10) props["Name"] = l.Customer.Name props["Email"] = l.Customer.Email props["Company"] = l.Customer.Company - props["PhoneNumber"] = l.Customer.PhoneNumber props["EmailNotificationContents"] = strconv.FormatBool(*l.Features.EmailNotificationContents) props["MessageExport"] = strconv.FormatBool(*l.Features.MessageExport) props["CustomPermissionsSchemes"] = strconv.FormatBool(*l.Features.CustomPermissionsSchemes) + props["GuestAccounts"] = strconv.FormatBool(*l.Features.GuestAccounts) + props["GuestAccountsPermissions"] = strconv.FormatBool(*l.Features.GuestAccountsPermissions) props["CustomTermsOfService"] = strconv.FormatBool(*l.Features.CustomTermsOfService) + props["LockTeammateNameDisplay"] = strconv.FormatBool(*l.Features.LockTeammateNameDisplay) + props["Cloud"] = strconv.FormatBool(*l.Features.Cloud) + props["SharedChannels"] = strconv.FormatBool(*l.Features.SharedChannels) + props["RemoteClusterService"] = strconv.FormatBool(*l.Features.RemoteClusterService) } return props diff --git a/vendor/github.com/mattermost/mattermost-server/utils/logger.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/logger.go similarity index 57% rename from vendor/github.com/mattermost/mattermost-server/utils/logger.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/logger.go index 49b560b..49a6393 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/logger.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/logger.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package utils @@ -7,15 +7,15 @@ import ( "path/filepath" "strings" - "github.com/mattermost/mattermost-server/mlog" - "github.com/mattermost/mattermost-server/model" - "github.com/mattermost/mattermost-server/utils/fileutils" + "github.com/mattermost/mattermost-server/v5/mlog" + "github.com/mattermost/mattermost-server/v5/model" + "github.com/mattermost/mattermost-server/v5/utils/fileutils" ) const ( - LOG_ROTATE_SIZE = 10000 - LOG_FILENAME = "mattermost.log" - LOG_NOTIFICATION_FILENAME = "notifications.log" + LogRotateSize = 10000 + LogFilename = "mattermost.log" + LogNotificationFilename = "notifications.log" ) type fileLocationFunc func(string) string @@ -37,7 +37,7 @@ func GetLogFileLocation(fileLocation string) string { fileLocation, _ = fileutils.FindDir("logs") } - return filepath.Join(fileLocation, LOG_FILENAME) + return filepath.Join(fileLocation, LogFilename) } func GetNotificationsLogFileLocation(fileLocation string) string { @@ -45,18 +45,19 @@ func GetNotificationsLogFileLocation(fileLocation string) string { fileLocation, _ = fileutils.FindDir("logs") } - return filepath.Join(fileLocation, LOG_NOTIFICATION_FILENAME) + return filepath.Join(fileLocation, LogNotificationFilename) } func GetLogSettingsFromNotificationsLogSettings(notificationLogSettings *model.NotificationLogSettings) *model.LogSettings { return &model.LogSettings{ - ConsoleJson: notificationLogSettings.ConsoleJson, - ConsoleLevel: notificationLogSettings.ConsoleLevel, - EnableConsole: notificationLogSettings.EnableConsole, - EnableFile: notificationLogSettings.EnableFile, - FileJson: notificationLogSettings.FileJson, - FileLevel: notificationLogSettings.FileLevel, - FileLocation: notificationLogSettings.FileLocation, + ConsoleJson: notificationLogSettings.ConsoleJson, + ConsoleLevel: notificationLogSettings.ConsoleLevel, + EnableConsole: notificationLogSettings.EnableConsole, + EnableFile: notificationLogSettings.EnableFile, + FileJson: notificationLogSettings.FileJson, + FileLevel: notificationLogSettings.FileLevel, + FileLocation: notificationLogSettings.FileLocation, + AdvancedLoggingConfig: notificationLogSettings.AdvancedLoggingConfig, } } diff --git a/vendor/github.com/mattermost/mattermost-server/utils/markdown/autolink.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/autolink.go similarity index 95% rename from vendor/github.com/mattermost/mattermost-server/utils/markdown/autolink.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/autolink.go index d362151..d06ada6 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/markdown/autolink.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/autolink.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package markdown @@ -14,6 +14,7 @@ import ( var ( DefaultUrlSchemes = []string{"http", "https", "ftp", "mailto", "tel"} + wwwAutoLinkRegex = regexp.MustCompile(`^www\d{0,3}\.`) ) // Given a string with a w at the given position, tries to parse and return a range containing a www link. @@ -30,7 +31,7 @@ func parseWWWAutolink(data string, position int) (Range, bool) { } // Check that this starts with www - if len(data)-position < 4 || !regexp.MustCompile(`^www\d{0,3}\.`).MatchString(data[position:]) { + if len(data)-position < 4 || !wwwAutoLinkRegex.MatchString(data[position:]) { return Range{}, false } @@ -59,9 +60,8 @@ func isAllowedBeforeWWWLink(c byte) bool { switch c { case '*', '_', '~', ')': return true - default: - return false } + return false } // Given a string with a : at the given position, tried to parse and return a range containing a URL scheme @@ -153,9 +153,8 @@ func checkDomain(data string, allowShort bool) int { // this is called from parseWWWAutolink if foundPeriod { return i - } else { - return 0 } + return 0 } // Returns true if the provided link starts with a valid character for a domain name. Equivalent to @@ -251,7 +250,6 @@ func canEndAutolink(c rune) bool { switch c { case '?', '!', '.', ',', ':', '*', '_', '~', '\'', '"': return false - default: - return true } + return true } diff --git a/vendor/github.com/mattermost/mattermost-server/utils/markdown/block_quote.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/block_quote.go similarity index 92% rename from vendor/github.com/mattermost/mattermost-server/utils/markdown/block_quote.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/block_quote.go index 04a3246..6ae2ff4 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/markdown/block_quote.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/block_quote.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package markdown diff --git a/vendor/github.com/mattermost/mattermost-server/utils/markdown/blocks.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/blocks.go similarity index 86% rename from vendor/github.com/mattermost/mattermost-server/utils/markdown/blocks.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/blocks.go index 921cdeb..607356e 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/markdown/blocks.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/blocks.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package markdown @@ -37,13 +37,14 @@ type Range struct { End int } -func closeBlocks(blocks []Block, referenceDefinitions *[]*ReferenceDefinition) { +func closeBlocks(blocks []Block, referenceDefinitions []*ReferenceDefinition) []*ReferenceDefinition { for _, block := range blocks { block.Close() if p, ok := block.(*Paragraph); ok && len(p.ReferenceDefinitions) > 0 { - *referenceDefinitions = append(*referenceDefinitions, p.ReferenceDefinitions...) + referenceDefinitions = append(referenceDefinitions, p.ReferenceDefinitions...) } } + return referenceDefinitions } func ParseBlocks(markdown string, lines []Line) (*Document, []*ReferenceDefinition) { @@ -78,7 +79,7 @@ func ParseBlocks(markdown string, lines []Line) (*Document, []*ReferenceDefiniti for i := lastMatchIndex; i >= 0; i-- { if container, ok := openBlocks[i].(ContainerBlock); ok { if addedBlocks := container.AddChild(newBlocks); addedBlocks != nil { - closeBlocks(openBlocks[i+1:], &referenceDefinitions) + referenceDefinitions = closeBlocks(openBlocks[i+1:], referenceDefinitions) openBlocks = openBlocks[:i+1] openBlocks = append(openBlocks, addedBlocks...) didAdd = true @@ -98,7 +99,7 @@ func ParseBlocks(markdown string, lines []Line) (*Document, []*ReferenceDefiniti continue } - closeBlocks(openBlocks[lastMatchIndex+1:], &referenceDefinitions) + referenceDefinitions = closeBlocks(openBlocks[lastMatchIndex+1:], referenceDefinitions) openBlocks = openBlocks[:lastMatchIndex+1] if openBlocks[lastMatchIndex].AddLine(indentation, r) { @@ -109,7 +110,7 @@ func ParseBlocks(markdown string, lines []Line) (*Document, []*ReferenceDefiniti for i := lastMatchIndex; i >= 0; i-- { if container, ok := openBlocks[i].(ContainerBlock); ok { if newBlocks := container.AddChild([]Block{paragraph}); newBlocks != nil { - closeBlocks(openBlocks[i+1:], &referenceDefinitions) + referenceDefinitions = closeBlocks(openBlocks[i+1:], referenceDefinitions) openBlocks = openBlocks[:i+1] openBlocks = append(openBlocks, newBlocks...) break @@ -119,7 +120,7 @@ func ParseBlocks(markdown string, lines []Line) (*Document, []*ReferenceDefiniti } } - closeBlocks(openBlocks, &referenceDefinitions) + referenceDefinitions = closeBlocks(openBlocks, referenceDefinitions) return document, referenceDefinitions } diff --git a/vendor/github.com/mattermost/mattermost-server/utils/markdown/document.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/document.go similarity index 76% rename from vendor/github.com/mattermost/mattermost-server/utils/markdown/document.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/document.go index 224b5d2..306b93d 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/markdown/document.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/document.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package markdown diff --git a/vendor/github.com/mattermost/mattermost-server/utils/markdown/fenced_code.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/fenced_code.go similarity index 95% rename from vendor/github.com/mattermost/mattermost-server/utils/markdown/fenced_code.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/fenced_code.go index 8b2ebd4..4fd97fd 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/markdown/fenced_code.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/fenced_code.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package markdown diff --git a/vendor/github.com/mattermost/mattermost-server/utils/markdown/html.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/html.go similarity index 97% rename from vendor/github.com/mattermost/mattermost-server/utils/markdown/html.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/html.go index afb72bf..5258307 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/markdown/html.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/html.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package markdown diff --git a/vendor/github.com/mattermost/mattermost-server/utils/markdown/html_entities.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/html_entities.go similarity index 99% rename from vendor/github.com/mattermost/mattermost-server/utils/markdown/html_entities.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/html_entities.go index 8fe2481..e94cebb 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/markdown/html_entities.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/html_entities.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package markdown diff --git a/vendor/github.com/mattermost/mattermost-server/utils/markdown/indented_code.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/indented_code.go similarity index 94% rename from vendor/github.com/mattermost/mattermost-server/utils/markdown/indented_code.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/indented_code.go index dc5dce1..a89ee6c 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/markdown/indented_code.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/indented_code.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package markdown diff --git a/vendor/github.com/mattermost/mattermost-server/utils/markdown/inlines.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/inlines.go similarity index 98% rename from vendor/github.com/mattermost/mattermost-server/utils/markdown/inlines.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/inlines.go index 8ec0fcc..43dee3b 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/markdown/inlines.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/inlines.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package markdown @@ -406,10 +406,9 @@ func (p *inlineParser) lookForLinkOrImage() { } p.delimiterStack.Remove(element) return - } else { - p.delimiterStack.Remove(element) - break } + p.delimiterStack.Remove(element) + break } absPos := relativeToAbsolutePosition(p.ranges, p.position) p.inlines = append(p.inlines, &Text{ @@ -595,7 +594,7 @@ func ParseInlines(markdown string, ranges []Range, referenceDefinitions []*Refer } func MergeInlineText(inlines []Inline) []Inline { - var ret []Inline + ret := inlines[:0] for i, v := range inlines { // always add first node if i == 0 { diff --git a/vendor/github.com/mattermost/mattermost-server/utils/markdown/inspect.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/inspect.go similarity index 94% rename from vendor/github.com/mattermost/mattermost-server/utils/markdown/inspect.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/inspect.go index 80b5bc2..3c7f2d1 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/markdown/inspect.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/inspect.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package markdown diff --git a/vendor/github.com/mattermost/mattermost-server/utils/markdown/lines.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/lines.go similarity index 69% rename from vendor/github.com/mattermost/mattermost-server/utils/markdown/lines.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/lines.go index a38b516..f59e5af 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/markdown/lines.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/lines.go @@ -1,15 +1,20 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package markdown +import ( + "strings" +) + type Line struct { Range } -func ParseLines(markdown string) (lines []Line) { +func ParseLines(markdown string) []Line { lineStartPosition := 0 isAfterCarriageReturn := false + lines := make([]Line, 0, strings.Count(markdown, "\n")) for position, r := range markdown { if r == '\n' { lines = append(lines, Line{Range{lineStartPosition, position + 1}}) @@ -23,5 +28,5 @@ func ParseLines(markdown string) (lines []Line) { if lineStartPosition < len(markdown) { lines = append(lines, Line{Range{lineStartPosition, len(markdown)}}) } - return + return lines } diff --git a/vendor/github.com/mattermost/mattermost-server/utils/markdown/links.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/links.go similarity index 91% rename from vendor/github.com/mattermost/mattermost-server/utils/markdown/links.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/links.go index 9f3128c..6aa56f2 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/markdown/links.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/links.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package markdown @@ -147,7 +147,7 @@ func parseImageDimensions(markdown string, position int) (raw Range, next int, o // Read width hasWidth := false - for isNumericByte(markdown[position]) { + for position < len(markdown)-1 && isNumericByte(markdown[position]) { hasWidth = true position += 1 } @@ -158,14 +158,14 @@ func parseImageDimensions(markdown string, position int) (raw Range, next int, o } // Read the x - if markdown[position] != 'x' && markdown[position] != 'X' { + if (markdown[position] != 'x' && markdown[position] != 'X') || position == len(markdown)-1 { return } position += 1 // Read height hasHeight := false - for isNumericByte(markdown[position]) { + for position < len(markdown)-1 && isNumericByte(markdown[position]) { hasHeight = true position += 1 } diff --git a/vendor/github.com/mattermost/mattermost-server/utils/markdown/list.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/list.go similarity index 98% rename from vendor/github.com/mattermost/mattermost-server/utils/markdown/list.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/list.go index aea7115..3903929 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/markdown/list.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/list.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package markdown diff --git a/vendor/github.com/mattermost/mattermost-server/utils/markdown/markdown.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/markdown.go similarity index 96% rename from vendor/github.com/mattermost/mattermost-server/utils/markdown/markdown.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/markdown.go index 57b10f3..5ccdad8 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/markdown/markdown.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/markdown.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. // This package implements a parser for the subset of the CommonMark spec necessary for us to do // server-side processing. It is not a full implementation and lacks many features. But it is @@ -23,9 +23,8 @@ func isWhitespace(c rune) bool { switch c { case ' ', '\t', '\n', '\u000b', '\u000c', '\r': return true - default: - return false } + return false } func isWhitespaceByte(c byte) bool { diff --git a/vendor/github.com/mattermost/mattermost-server/utils/markdown/paragraph.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/paragraph.go similarity index 93% rename from vendor/github.com/mattermost/mattermost-server/utils/markdown/paragraph.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/paragraph.go index 6a40fdf..aef01b5 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/markdown/paragraph.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/paragraph.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package markdown diff --git a/vendor/github.com/mattermost/mattermost-server/utils/markdown/reference_definition.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/reference_definition.go similarity index 94% rename from vendor/github.com/mattermost/mattermost-server/utils/markdown/reference_definition.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/reference_definition.go index e2d0be3..69e8ed9 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/markdown/reference_definition.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/markdown/reference_definition.go @@ -1,5 +1,5 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package markdown diff --git a/vendor/github.com/mattermost/mattermost-server/utils/merge.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/merge.go similarity index 97% rename from vendor/github.com/mattermost/mattermost-server/utils/merge.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/merge.go index 968b499..84287b0 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/merge.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/merge.go @@ -1,5 +1,5 @@ -// Copyright (c) 2019-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package utils diff --git a/vendor/github.com/mattermost/mattermost-server/utils/password.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/password.go similarity index 86% rename from vendor/github.com/mattermost/mattermost-server/utils/password.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/password.go index 087b99a..9e05a8a 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/password.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/password.go @@ -1,5 +1,5 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. package utils @@ -7,7 +7,7 @@ import ( "net/http" "strings" - "github.com/mattermost/mattermost-server/model" + "github.com/mattermost/mattermost-server/v5/model" ) func IsPasswordValidWithSettings(password string, settings *model.PasswordSettings) *model.AppError { diff --git a/vendor/github.com/mattermost/mattermost-server/utils/policies-roles-mapping.json b/vendor/github.com/mattermost/mattermost-server/v5/utils/policies-roles-mapping.json similarity index 99% rename from vendor/github.com/mattermost/mattermost-server/utils/policies-roles-mapping.json rename to vendor/github.com/mattermost/mattermost-server/v5/utils/policies-roles-mapping.json index 414ab62..2492915 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/policies-roles-mapping.json +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/policies-roles-mapping.json @@ -420,7 +420,7 @@ { "roleName": "system_admin", "permission": "edit_post", - "shouldHave": false + "shouldHave": true } ] }, diff --git a/vendor/github.com/mattermost/mattermost-server/utils/random.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/random.go similarity index 86% rename from vendor/github.com/mattermost/mattermost-server/utils/random.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/random.go index 8f3008f..eaffcdc 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/random.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/random.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package utils diff --git a/vendor/github.com/mattermost/mattermost-server/utils/subpath.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/subpath.go similarity index 81% rename from vendor/github.com/mattermost/mattermost-server/utils/subpath.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/subpath.go index 9bb75db..b6f61ee 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/subpath.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/subpath.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package utils @@ -17,9 +17,9 @@ import ( "github.com/pkg/errors" - "github.com/mattermost/mattermost-server/mlog" - "github.com/mattermost/mattermost-server/model" - "github.com/mattermost/mattermost-server/utils/fileutils" + "github.com/mattermost/mattermost-server/v5/mlog" + "github.com/mattermost/mattermost-server/v5/model" + "github.com/mattermost/mattermost-server/v5/utils/fileutils" ) // getSubpathScript renders the inline script that defines window.publicPath to change how webpack loads assets. @@ -45,14 +45,14 @@ func GetSubpathScriptHash(subpath string) string { return fmt.Sprintf(" 'sha256-%s'", base64.StdEncoding.EncodeToString(scriptHash[:])) } -// UpdateAssetsSubpath rewrites assets in the /client directory to assume the application is hosted -// at the given subpath instead of at the root. No changes are written unless necessary. -func UpdateAssetsSubpath(subpath string) error { +// UpdateAssetsSubpathInDir rewrites assets in the given directory to assume the application is +// hosted at the given subpath instead of at the root. No changes are written unless necessary. +func UpdateAssetsSubpathInDir(subpath, directory string) error { if subpath == "" { subpath = "/" } - staticDir, found := fileutils.FindDir(model.CLIENT_DIR) + staticDir, found := fileutils.FindDir(directory) if !found { return errors.New("failed to find client dir") } @@ -85,13 +85,13 @@ func UpdateAssetsSubpath(subpath string) error { newRootHtml := string(oldRootHtml) - reCSP := regexp.MustCompile(``) + reCSP := regexp.MustCompile(``) if results := reCSP.FindAllString(newRootHtml, -1); len(results) == 0 { return fmt.Errorf("failed to find 'Content-Security-Policy' meta tag to rewrite") } newRootHtml = reCSP.ReplaceAllLiteralString(newRootHtml, fmt.Sprintf( - ``, + ``, GetSubpathScriptHash(subpath), )) @@ -120,13 +120,13 @@ func UpdateAssetsSubpath(subpath string) error { // Rewrite the manifest.json and *.css references to `/static/*` (or a previously rewritten subpath). err = filepath.Walk(staticDir, func(walkPath string, info os.FileInfo, err error) error { if filepath.Base(walkPath) == "manifest.json" || filepath.Ext(walkPath) == ".css" { - if old, err := ioutil.ReadFile(walkPath); err != nil { + old, err := ioutil.ReadFile(walkPath) + if err != nil { return errors.Wrapf(err, "failed to open %s", walkPath) - } else { - new := strings.Replace(string(old), pathToReplace, newPath, -1) - if err = ioutil.WriteFile(walkPath, []byte(new), 0); err != nil { - return errors.Wrapf(err, "failed to update %s with subpath %s", walkPath, subpath) - } + } + new := strings.Replace(string(old), pathToReplace, newPath, -1) + if err = ioutil.WriteFile(walkPath, []byte(new), 0); err != nil { + return errors.Wrapf(err, "failed to update %s with subpath %s", walkPath, subpath) } } @@ -139,6 +139,12 @@ func UpdateAssetsSubpath(subpath string) error { return nil } +// UpdateAssetsSubpath rewrites assets in the /client directory to assume the application is hosted +// at the given subpath instead of at the root. No changes are written unless necessary. +func UpdateAssetsSubpath(subpath string) error { + return UpdateAssetsSubpathInDir(subpath, model.CLIENT_DIR) +} + // UpdateAssetsSubpathFromConfig uses UpdateAssetsSubpath and any path defined in the SiteURL. func UpdateAssetsSubpathFromConfig(config *model.Config) error { // Don't rewrite in development environments, since webpack in developer mode constantly @@ -148,6 +154,12 @@ func UpdateAssetsSubpathFromConfig(config *model.Config) error { return nil } + // Similarly, don't rewrite during a CI build, when the assets may not even be present. + if os.Getenv("IS_CI") == "true" { + mlog.Debug("Skipping update to assets subpath since CI build") + return nil + } + subpath, err := GetSubpathFromConfig(config) if err != nil { return err diff --git a/vendor/github.com/mattermost/mattermost-server/v5/utils/test_files_compiler.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/test_files_compiler.go new file mode 100644 index 0000000..ff6bc14 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/test_files_compiler.go @@ -0,0 +1,45 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package utils + +import ( + "bytes" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "testing" + + "github.com/stretchr/testify/require" +) + +func CompileGo(t *testing.T, sourceCode, outputPath string) { + dir, err := ioutil.TempDir(".", "") + require.NoError(t, err) + defer os.RemoveAll(dir) + + dir, err = filepath.Abs(dir) + require.NoError(t, err) + + // Write out main.go given the source code. + main := filepath.Join(dir, "main.go") + err = ioutil.WriteFile(main, []byte(sourceCode), 0600) + require.NoError(t, err) + + _, sourceFile, _, ok := runtime.Caller(0) + require.True(t, ok) + serverPath := filepath.Dir(filepath.Dir(sourceFile)) + + out := &bytes.Buffer{} + cmd := exec.Command("go", "build", "-o", outputPath, main) + cmd.Dir = serverPath + cmd.Stdout = out + cmd.Stderr = out + err = cmd.Run() + if err != nil { + t.Log("Go compile errors:\n", out.String()) + } + require.NoError(t, err, "failed to compile go") +} diff --git a/vendor/github.com/mattermost/mattermost-server/utils/textgeneration.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/textgeneration.go similarity index 97% rename from vendor/github.com/mattermost/mattermost-server/utils/textgeneration.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/textgeneration.go index cede614..db9e3af 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/textgeneration.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/textgeneration.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package utils @@ -14,7 +14,7 @@ const ( ) // Strings that should pass as acceptable posts -var FUZZY_STRINGS_POSTS = []string{ +var FuzzyStringsPosts = []string{ `**[1] - [Markdown Tests]** _italics_ more _italics_ @@ -367,7 +367,7 @@ This is a link to http://example.com. } // Strings that should pass as acceptable team names -var FUZZY_STRINGS_NAMES = []string{ +var FuzzyStringsNames = []string{ "*", "?", ".", @@ -426,7 +426,7 @@ var FUZZY_STRINGS_NAMES = []string{ } // Strings that should pass as acceptable emails -var FUZZY_STRINGS_EMAILS = []string{ +var FuzzyStringsEmails = []string{ "sue@thatmightbe", "sue@thatmightbe.c", "sue@thatmightbe.co", @@ -439,7 +439,7 @@ var FUZZY_STRINGS_EMAILS = []string{ } // Lovely giberish for all to use -const GIBBERISH_TEXT = ` +const GibberishText = ` Thus one besides much goodness shyly far some hyena overtook since rhinoceros nodded withdrew wombat before deserved apart a alongside the far dalmatian less ouch where yet a salmon. Then jeez far marginal hey aboard more as leaned much oversold that inside spoke showed much went crud close save so and and after and informally much lion commendably less conductive oh excepting conductive compassionate jeepers hey a much leopard alas woolly untruthful outside snug rashly one cunning past fabulous adjusted far woodchuck and and indecisive crud loving exotic less resolute ladybug sprang drank under following far the as hence passably stolidly jeez the inset spaciously more cozily fishily the hey alas petted one audible yikes dear preparatory darn goldfinch gosh a then as moth more guinea. Timid mislaid as salamander yikes alas ouch much that goldfinch shark in before instead dear one swore vivid versus one until regardless sang panther tolerable much preparatory hardily shuddered where coquettish far sheep coarsely exaggerated preparatory because cordial awesome gradually nutria that dear mocking behind off staunchly regarding a the komodo crud shrewd well jeez iguanodon strove strived and moodily and sought and and mounted gosh aboard crud spitefully boa. @@ -463,7 +463,7 @@ The painful essential jeepers merrily proudly essential and less far dismounted Beside far this this a crud polite cantankerous exclusively misheard pled far circuitously and frugal less more temperately gauche goldfinch oh against this along excitedly goodhearted more classically quit serenely outside vulture ouch after one a this yet. Less and handsomely manatee some amidst much reined komodo busted exultingly but fatuously less across mighty goodness objective alas glaringly gregariously hello the since one pridefully much well placed far less goodness jellyfish unnecessary reciprocating a far stylistic gazed one. Hey rethought excepting lamely much and naughtily amidst more since jeez then bluebird hence less bald by some brought left the across logic loyal brightly jeez capitally that less more forward rebound a yikes chose convulsively confidently repeated broadcast much dipped when awesomely or some some regal the scowled merry zebra since more credible so inescapably fetchingly and lantern that due dear one went gosh wow well furrowed much much specially spoiled as vitally instead the seriously some rooster irrespective well imprecisely rapidly more llama. -Up to and hey without pill that this squid alas brusque on inventoried and spread the more excepting aristocratically due piquant wove beneath that macaw in more until much grimaced far and jeez enticingly unicorn some far crab more barring purely jeepers clear groomed glaring hey dear hence before the the this hello.` +Up to and hey without pill that this squid alas brusque on inventoried and spread the more excepting aristocratically due piquant wove beneath that macaw in more until much grimaced far and jeez enticingly unicorn some far crab more barring purely jeepers clear groomed glaring hey dear hence before the this hello.` func RandString(l int, charset string) string { ret := make([]byte, l) @@ -481,7 +481,7 @@ func RandString(l int, charset string) string { // } // func FuzzEmail() string { -// return FUZZY_STRINGS_EMAILS[RandIntFromRange(Range{0, len(FUZZY_STRINGS_EMAILS) - 1})] +// return FuzzyStringsEmails[RandIntFromRange(Range{0, len(FuzzyStringsEmails) - 1})] // } func RandomName(length Range, charset string) string { @@ -490,7 +490,7 @@ func RandomName(length Range, charset string) string { } func FuzzName() string { - return FUZZY_STRINGS_NAMES[RandIntFromRange(Range{0, len(FUZZY_STRINGS_NAMES) - 1})] + return FuzzyStringsNames[RandIntFromRange(Range{0, len(FuzzyStringsNames) - 1})] } // Random selection of text for post @@ -498,12 +498,12 @@ func RandomText(length Range, hashtags Range, mentions Range, users []string) st textLength := RandIntFromRange(length) numHashtags := RandIntFromRange(hashtags) numMentions := RandIntFromRange(mentions) - if textLength > len(GIBBERISH_TEXT) || textLength < 0 { - textLength = len(GIBBERISH_TEXT) + if textLength > len(GibberishText) || textLength < 0 { + textLength = len(GibberishText) } - startPosition := RandIntFromRange(Range{0, len(GIBBERISH_TEXT) - textLength - 1}) + startPosition := RandIntFromRange(Range{0, len(GibberishText) - textLength - 1}) - words := strings.Split(GIBBERISH_TEXT[startPosition:startPosition+textLength], " ") + words := strings.Split(GibberishText[startPosition:startPosition+textLength], " ") for i := 0; i < numHashtags; i++ { randword := RandIntFromRange(Range{0, len(words) - 1}) words = append(words, " #"+words[randword]) @@ -525,5 +525,5 @@ func RandomText(length Range, hashtags Range, mentions Range, users []string) st } func FuzzPost() string { - return FUZZY_STRINGS_POSTS[RandIntFromRange(Range{0, len(FUZZY_STRINGS_POSTS) - 1})] + return FuzzyStringsPosts[RandIntFromRange(Range{0, len(FuzzyStringsPosts) - 1})] } diff --git a/vendor/github.com/mattermost/mattermost-server/utils/time.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/time.go similarity index 93% rename from vendor/github.com/mattermost/mattermost-server/utils/time.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/time.go index 5227887..7843fe4 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/time.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/time.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package utils diff --git a/vendor/github.com/mattermost/mattermost-server/utils/urlencode.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/urlencode.go similarity index 86% rename from vendor/github.com/mattermost/mattermost-server/utils/urlencode.go rename to vendor/github.com/mattermost/mattermost-server/v5/utils/urlencode.go index 0110ab6..dae3f7e 100644 --- a/vendor/github.com/mattermost/mattermost-server/utils/urlencode.go +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/urlencode.go @@ -1,5 +1,5 @@ // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. +// See LICENSE.txt for license information. package utils diff --git a/vendor/github.com/mattermost/mattermost-server/v5/utils/utils.go b/vendor/github.com/mattermost/mattermost-server/v5/utils/utils.go new file mode 100644 index 0000000..5135dbf --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/v5/utils/utils.go @@ -0,0 +1,217 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package utils + +import ( + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + + "github.com/pkg/errors" + + "github.com/mattermost/mattermost-server/v5/model" +) + +func StringInSlice(a string, slice []string) bool { + for _, b := range slice { + if b == a { + return true + } + } + return false +} + +// RemoveStringFromSlice removes the first occurrence of a from slice. +func RemoveStringFromSlice(a string, slice []string) []string { + for i, str := range slice { + if str == a { + return append(slice[:i], slice[i+1:]...) + } + } + return slice +} + +// RemoveStringsFromSlice removes all occurrences of strings from slice. +func RemoveStringsFromSlice(slice []string, strings ...string) []string { + newSlice := []string{} + + for _, item := range slice { + if !StringInSlice(item, strings) { + newSlice = append(newSlice, item) + } + } + + return newSlice +} + +func StringArrayIntersection(arr1, arr2 []string) []string { + arrMap := map[string]bool{} + result := []string{} + + for _, value := range arr1 { + arrMap[value] = true + } + + for _, value := range arr2 { + if arrMap[value] { + result = append(result, value) + } + } + + return result +} + +func RemoveDuplicatesFromStringArray(arr []string) []string { + result := make([]string, 0, len(arr)) + seen := make(map[string]bool) + + for _, item := range arr { + if !seen[item] { + result = append(result, item) + seen[item] = true + } + } + + return result +} + +func StringSliceDiff(a, b []string) []string { + m := make(map[string]bool) + result := []string{} + + for _, item := range b { + m[item] = true + } + + for _, item := range a { + if !m[item] { + result = append(result, item) + } + } + return result +} + +func GetIpAddress(r *http.Request, trustedProxyIPHeader []string) string { + address := "" + + for _, proxyHeader := range trustedProxyIPHeader { + header := r.Header.Get(proxyHeader) + if len(header) > 0 { + addresses := strings.Fields(header) + if len(addresses) > 0 { + address = strings.TrimRight(addresses[0], ",") + } + } + + if len(address) > 0 { + return address + } + } + + if len(address) == 0 { + address, _, _ = net.SplitHostPort(r.RemoteAddr) + } + + return address +} + +func GetHostnameFromSiteURL(siteURL string) string { + u, err := url.Parse(siteURL) + if err != nil { + return "" + } + + return u.Hostname() +} + +type RequestCache struct { + Data []byte + Date string + Key string +} + +// Fetch JSON data from the notices server +// if skip is passed, does a fetch without touching the cache +func GetUrlWithCache(url string, cache *RequestCache, skip bool) ([]byte, error) { + // Build a GET Request, including optional If-None-Match header. + req, err := http.NewRequest("GET", url, nil) + if err != nil { + cache.Data = nil + return nil, err + } + if !skip && cache.Data != nil { + req.Header.Add("If-None-Match", cache.Key) + req.Header.Add("If-Modified-Since", cache.Date) + } + + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + cache.Data = nil + return nil, err + } + defer resp.Body.Close() + // No change from latest known Etag? + if resp.StatusCode == http.StatusNotModified { + return cache.Data, nil + } + + if resp.StatusCode != 200 { + cache.Data = nil + return nil, errors.Errorf("Fetching notices failed with status code %d", resp.StatusCode) + } + + cache.Data, err = ioutil.ReadAll(resp.Body) + if err != nil { + cache.Data = nil + return nil, err + } + + // If etags headers are missing, ignore. + cache.Key = resp.Header.Get("ETag") + cache.Date = resp.Header.Get("Date") + return cache.Data, err +} + +// Append tokens to passed baseUrl as query params +func AppendQueryParamsToURL(baseUrl string, params map[string]string) string { + u, err := url.Parse(baseUrl) + if err != nil { + return "" + } + q, err := url.ParseQuery(u.RawQuery) + if err != nil { + return "" + } + for key, value := range params { + q.Add(key, value) + } + u.RawQuery = q.Encode() + return u.String() +} + +// Validates RedirectURL passed during OAuth or SAML +func IsValidWebAuthRedirectURL(config *model.Config, redirectURL string) bool { + u, err := url.Parse(redirectURL) + if err == nil && (u.Scheme == "http" || u.Scheme == "https") { + if config.ServiceSettings.SiteURL != nil { + siteUrl := *config.ServiceSettings.SiteURL + return strings.Index(strings.ToLower(redirectURL), strings.ToLower(siteUrl)) == 0 + } + return false + } + return true +} + +// Validates Mobile Custom URL Scheme passed during OAuth or SAML +func IsValidMobileAuthRedirectURL(config *model.Config, redirectURL string) bool { + for _, URLScheme := range config.NativeAppSettings.AppCustomURLSchemes { + if strings.Index(strings.ToLower(redirectURL), strings.ToLower(URLScheme)) == 0 { + return true + } + } + return false +} diff --git a/vendor/github.com/mattn/go-colorable/.travis.yml b/vendor/github.com/mattn/go-colorable/.travis.yml new file mode 100644 index 0000000..7942c56 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/.travis.yml @@ -0,0 +1,15 @@ +language: go +sudo: false +go: + - 1.13.x + - tip + +before_install: + - go get -t -v ./... + +script: + - ./go.test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) + diff --git a/vendor/github.com/mattn/go-colorable/LICENSE b/vendor/github.com/mattn/go-colorable/LICENSE new file mode 100644 index 0000000..91b5cef --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md new file mode 100644 index 0000000..e055952 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/README.md @@ -0,0 +1,48 @@ +# go-colorable + +[![Build Status](https://travis-ci.org/mattn/go-colorable.svg?branch=master)](https://travis-ci.org/mattn/go-colorable) +[![Codecov](https://codecov.io/gh/mattn/go-colorable/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-colorable) +[![GoDoc](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable) + +Colorable writer for windows. + +For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) +This package is possible to handle escape sequence for ansi color on windows. + +## Too Bad! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) + + +## So Good! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) + +## Usage + +```go +logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) +logrus.SetOutput(colorable.NewColorableStdout()) + +logrus.Info("succeeded") +logrus.Warn("not correct") +logrus.Error("something error") +logrus.Fatal("panic") +``` + +You can compile above code on non-windows OSs. + +## Installation + +``` +$ go get github.com/mattn/go-colorable +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go new file mode 100644 index 0000000..1f7806f --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_appengine.go @@ -0,0 +1,37 @@ +// +build appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable returns new instance of Writer which handles escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go new file mode 100644 index 0000000..08cbd1e --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -0,0 +1,38 @@ +// +build !windows +// +build !appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable returns new instance of Writer which handles escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go new file mode 100644 index 0000000..41215d7 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -0,0 +1,1043 @@ +// +build windows +// +build !appengine + +package colorable + +import ( + "bytes" + "io" + "math" + "os" + "strconv" + "strings" + "sync" + "syscall" + "unsafe" + + "github.com/mattn/go-isatty" +) + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) + commonLvbUnderscore = 0x8000 + + cENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 +) + +const ( + genericRead = 0x80000000 + genericWrite = 0x40000000 +) + +const ( + consoleTextmodeBuffer = 0x1 +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +type consoleCursorInfo struct { + size dword + visible int32 +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") + procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") + procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procSetConsoleMode = kernel32.NewProc("SetConsoleMode") + procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") +) + +// Writer provides colorable Writer to the console +type Writer struct { + out io.Writer + handle syscall.Handle + althandle syscall.Handle + oldattr word + oldpos coord + rest bytes.Buffer + mutex sync.Mutex +} + +// NewColorable returns new instance of Writer which handles escape sequence from File. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isatty.IsTerminal(file.Fd()) { + var mode uint32 + if r, _, _ := procGetConsoleMode.Call(file.Fd(), uintptr(unsafe.Pointer(&mode))); r != 0 && mode&cENABLE_VIRTUAL_TERMINAL_PROCESSING != 0 { + return file + } + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}} + } + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return NewColorable(os.Stdout) +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return NewColorable(os.Stderr) +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +// `\033]0;TITLESTR\007` +func doTitleSequence(er *bytes.Reader) error { + var c byte + var err error + + c, err = er.ReadByte() + if err != nil { + return err + } + if c != '0' && c != '2' { + return nil + } + c, err = er.ReadByte() + if err != nil { + return err + } + if c != ';' { + return nil + } + title := make([]byte, 0, 80) + for { + c, err = er.ReadByte() + if err != nil { + return err + } + if c == 0x07 || c == '\n' { + break + } + title = append(title, c) + } + if len(title) > 0 { + title8, err := syscall.UTF16PtrFromString(string(title)) + if err == nil { + procSetConsoleTitle.Call(uintptr(unsafe.Pointer(title8))) + } + } + return nil +} + +// returns Atoi(s) unless s == "" in which case it returns def +func atoiWithDefault(s string, def int) (int, error) { + if s == "" { + return def, nil + } + return strconv.Atoi(s) +} + +// Write writes data on console +func (w *Writer) Write(data []byte) (n int, err error) { + w.mutex.Lock() + defer w.mutex.Unlock() + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + handle := w.handle + + var er *bytes.Reader + if w.rest.Len() > 0 { + var rest bytes.Buffer + w.rest.WriteTo(&rest) + w.rest.Reset() + rest.Write(data) + er = bytes.NewReader(rest.Bytes()) + } else { + er = bytes.NewReader(data) + } + var bw [1]byte +loop: + for { + c1, err := er.ReadByte() + if err != nil { + break loop + } + if c1 != 0x1b { + bw[0] = c1 + w.out.Write(bw[:]) + continue + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + + switch c2 { + case '>': + continue + case ']': + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + if bytes.IndexByte(w.rest.Bytes(), 0x07) == -1 { + break loop + } + er = bytes.NewReader(w.rest.Bytes()[2:]) + err := doTitleSequence(er) + if err != nil { + break loop + } + w.rest.Reset() + continue + // https://github.com/mattn/go-colorable/issues/27 + case '7': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + continue + case '8': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + continue + case 0x5b: + // execute part after switch + default: + continue + } + + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + + var buf bytes.Buffer + var m byte + for i, c := range w.rest.Bytes()[2:] { + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + er = bytes.NewReader(w.rest.Bytes()[2+i+1:]) + w.rest.Reset() + break + } + buf.Write([]byte(string(c))) + } + if m == 0 { + break loop + } + + switch m { + case 'A': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + if csbi.cursorPosition.x < 0 { + csbi.cursorPosition.x = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + if n < 1 { + n = 1 + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n - 1) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H', 'f': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + if buf.Len() > 0 { + token := strings.Split(buf.String(), ";") + switch len(token) { + case 1: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + csbi.cursorPosition.y = short(n1 - 1) + case 2: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2 - 1) + csbi.cursorPosition.y = short(n1 - 1) + } + } else { + csbi.cursorPosition.y = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + var count, written dword + var cursor coord + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.window.top-csbi.cursorPosition.y)*dword(csbi.size.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var count, written dword + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'X': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var written dword + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i++ { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case n == 4: + attr |= commonLvbUnderscore + case (1 <= n && n <= 3) || n == 5: + attr |= foregroundIntensity + case n == 7 || n == 27: + attr = + (attr &^ (foregroundMask | backgroundMask)) | + ((attr & foregroundMask) << 4) | + ((attr & backgroundMask) >> 4) + case n == 22: + attr &^= foregroundIntensity + case n == 24: + attr &^= commonLvbUnderscore + case 30 <= n && n <= 37: + attr &= backgroundMask + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256%len(n256foreAttr)] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= foregroundRed + } + if g > 127 { + attr |= foregroundGreen + } + if b > 127 { + attr |= foregroundBlue + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr &= foregroundMask + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256%len(n256backAttr)] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= backgroundRed + } + if g > 127 { + attr |= backgroundGreen + } + if b > 127 { + attr |= backgroundBlue + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(attr)) + } + } + case 'h': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle == 0 { + h, _, _ := procCreateConsoleScreenBuffer.Call(uintptr(genericRead|genericWrite), 0, 0, uintptr(consoleTextmodeBuffer), 0, 0) + w.althandle = syscall.Handle(h) + if w.althandle != 0 { + handle = w.althandle + } + } + } + case 'l': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle != 0 { + syscall.CloseHandle(w.althandle) + w.althandle = 0 + handle = w.handle + } + } + case 's': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + case 'u': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + } + } + + return len(data), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + {0x000000, false, false, false, false}, + {0x000080, false, false, true, false}, + {0x008000, false, true, false, false}, + {0x008080, false, true, true, false}, + {0x800000, true, false, false, false}, + {0x800080, true, false, true, false}, + {0x808000, true, true, false, false}, + {0xc0c0c0, true, true, true, false}, + {0x808080, false, false, false, true}, + {0x0000ff, false, false, true, true}, + {0x00ff00, false, true, false, true}, + {0x00ffff, false, true, true, true}, + {0xff0000, true, false, false, true}, + {0xff00ff, true, false, true, true}, + {0xffff00, true, true, false, true}, + {0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + var mode uint32 + h := os.Stdout.Fd() + if r, _, _ := procGetConsoleMode.Call(h, uintptr(unsafe.Pointer(&mode))); r != 0 { + if r, _, _ = procSetConsoleMode.Call(h, uintptr(mode|cENABLE_VIRTUAL_TERMINAL_PROCESSING)); r != 0 { + if enabled != nil { + *enabled = true + } + return func() { + procSetConsoleMode.Call(h, uintptr(mode)) + } + } + } + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/go.mod b/vendor/github.com/mattn/go-colorable/go.mod new file mode 100644 index 0000000..1e590b8 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.mod @@ -0,0 +1,8 @@ +module github.com/mattn/go-colorable + +require ( + github.com/mattn/go-isatty v0.0.12 + golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae // indirect +) + +go 1.13 diff --git a/vendor/github.com/mattn/go-colorable/go.sum b/vendor/github.com/mattn/go-colorable/go.sum new file mode 100644 index 0000000..cf5b95d --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.sum @@ -0,0 +1,5 @@ +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/mattn/go-colorable/go.test.sh b/vendor/github.com/mattn/go-colorable/go.test.sh new file mode 100644 index 0000000..012162b --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go new file mode 100644 index 0000000..95f2c6b --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/noncolorable.go @@ -0,0 +1,55 @@ +package colorable + +import ( + "bytes" + "io" +) + +// NonColorable holds writer but removes escape sequence. +type NonColorable struct { + out io.Writer +} + +// NewNonColorable returns new instance of Writer which removes escape sequence from Writer. +func NewNonColorable(w io.Writer) io.Writer { + return &NonColorable{out: w} +} + +// Write writes data on console +func (w *NonColorable) Write(data []byte) (n int, err error) { + er := bytes.NewReader(data) + var bw [1]byte +loop: + for { + c1, err := er.ReadByte() + if err != nil { + break loop + } + if c1 != 0x1b { + bw[0] = c1 + w.out.Write(bw[:]) + continue + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + if c2 != 0x5b { + continue + } + + var buf bytes.Buffer + for { + c, err := er.ReadByte() + if err != nil { + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + break + } + buf.Write([]byte(string(c))) + } + } + + return len(data), nil +} diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml new file mode 100644 index 0000000..604314d --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/.travis.yml @@ -0,0 +1,14 @@ +language: go +sudo: false +go: + - 1.13.x + - tip + +before_install: + - go get -t -v ./... + +script: + - ./go.test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE new file mode 100644 index 0000000..65dc692 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md new file mode 100644 index 0000000..3841835 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/README.md @@ -0,0 +1,50 @@ +# go-isatty + +[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) +[![Codecov](https://codecov.io/gh/mattn/go-isatty/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-isatty) +[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) { + fmt.Println("Is Cygwin/MSYS2 Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +## License + +MIT + +## Author + +Yasuhiro Matsumoto (a.k.a mattn) + +## Thanks + +* k-takata: base idea for IsCygwinTerminal + + https://github.com/k-takata/go-iscygpty diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go new file mode 100644 index 0000000..17d4f90 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/vendor/github.com/mattn/go-isatty/go.mod b/vendor/github.com/mattn/go-isatty/go.mod new file mode 100644 index 0000000..605c4c2 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.mod @@ -0,0 +1,5 @@ +module github.com/mattn/go-isatty + +go 1.12 + +require golang.org/x/sys v0.0.0-20200116001909-b77594299b42 diff --git a/vendor/github.com/mattn/go-isatty/go.sum b/vendor/github.com/mattn/go-isatty/go.sum new file mode 100644 index 0000000..912e29c --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/mattn/go-isatty/go.test.sh b/vendor/github.com/mattn/go-isatty/go.test.sh new file mode 100644 index 0000000..012162b --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go new file mode 100644 index 0000000..711f288 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -0,0 +1,18 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go new file mode 100644 index 0000000..ff714a3 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -0,0 +1,15 @@ +// +build appengine js nacl + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on js and appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_plan9.go b/vendor/github.com/mattn/go-isatty/isatty_plan9.go new file mode 100644 index 0000000..c5b6e0c --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_plan9.go @@ -0,0 +1,22 @@ +// +build plan9 + +package isatty + +import ( + "syscall" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + path, err := syscall.Fd2path(int(fd)) + if err != nil { + return false + } + return path == "/dev/cons" || path == "/mnt/term/dev/cons" +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go new file mode 100644 index 0000000..bdd5c79 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -0,0 +1,22 @@ +// +build solaris +// +build !appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +func IsTerminal(fd uintptr) bool { + var termio unix.Termio + err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go new file mode 100644 index 0000000..31a1ca9 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -0,0 +1,18 @@ +// +build linux aix +// +build !appengine + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go new file mode 100644 index 0000000..1fa8691 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -0,0 +1,125 @@ +// +build windows +// +build !appengine + +package isatty + +import ( + "errors" + "strings" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + objectNameInfo uintptr = 1 + fileNameInfo = 2 + fileTypePipe = 3 +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + ntdll = syscall.NewLazyDLL("ntdll.dll") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") + procGetFileType = kernel32.NewProc("GetFileType") + procNtQueryObject = ntdll.NewProc("NtQueryObject") +) + +func init() { + // Check if GetFileInformationByHandleEx is available. + if procGetFileInformationByHandleEx.Find() != nil { + procGetFileInformationByHandleEx = nil + } +} + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// Check pipe name is used for cygwin/msys2 pty. +// Cygwin/MSYS2 PTY has a name like: +// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master +func isCygwinPipeName(name string) bool { + token := strings.Split(name, "-") + if len(token) < 5 { + return false + } + + if token[0] != `\msys` && + token[0] != `\cygwin` && + token[0] != `\Device\NamedPipe\msys` && + token[0] != `\Device\NamedPipe\cygwin` { + return false + } + + if token[1] == "" { + return false + } + + if !strings.HasPrefix(token[2], "pty") { + return false + } + + if token[3] != `from` && token[3] != `to` { + return false + } + + if token[4] != "master" { + return false + } + + return true +} + +// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler +// since GetFileInformationByHandleEx is not avilable under windows Vista and still some old fashion +// guys are using Windows XP, this is a workaround for those guys, it will also work on system from +// Windows vista to 10 +// see https://stackoverflow.com/a/18792477 for details +func getFileNameByHandle(fd uintptr) (string, error) { + if procNtQueryObject == nil { + return "", errors.New("ntdll.dll: NtQueryObject not supported") + } + + var buf [4 + syscall.MAX_PATH]uint16 + var result int + r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5, + fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0) + if r != 0 { + return "", e + } + return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. +func IsCygwinTerminal(fd uintptr) bool { + if procGetFileInformationByHandleEx == nil { + name, err := getFileNameByHandle(fd) + if err != nil { + return false + } + return isCygwinPipeName(name) + } + + // Cygwin/msys's pty is a pipe. + ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) + if ft != fileTypePipe || e != 0 { + return false + } + + var buf [2 + syscall.MAX_PATH]uint16 + r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), + 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)), + uintptr(len(buf)*2), 0, 0) + if r == 0 || e != 0 { + return false + } + + l := *(*uint32)(unsafe.Pointer(&buf)) + return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2]))) +} diff --git a/vendor/github.com/mattn/go-isatty/renovate.json b/vendor/github.com/mattn/go-isatty/renovate.json new file mode 100644 index 0000000..5ae9d96 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/renovate.json @@ -0,0 +1,8 @@ +{ + "extends": [ + "config:base" + ], + "postUpdateOptions": [ + "gomodTidy" + ] +} diff --git a/vendor/github.com/mitchellh/go-testing-interface/.travis.yml b/vendor/github.com/mitchellh/go-testing-interface/.travis.yml index 928d000..cca9491 100644 --- a/vendor/github.com/mitchellh/go-testing-interface/.travis.yml +++ b/vendor/github.com/mitchellh/go-testing-interface/.travis.yml @@ -1,7 +1,6 @@ language: go go: - - 1.8 - 1.x - tip diff --git a/vendor/github.com/mitchellh/go-testing-interface/README.md b/vendor/github.com/mitchellh/go-testing-interface/README.md index 26781bb..ee435ad 100644 --- a/vendor/github.com/mitchellh/go-testing-interface/README.md +++ b/vendor/github.com/mitchellh/go-testing-interface/README.md @@ -38,6 +38,14 @@ You can also call the test helper at runtime if needed: TestHelper(&testing.RuntimeT{}) } +## Versioning + +The tagged version matches the version of Go that the interface is +compatible with. For example, the version "1.14.0" is for Go 1.14 and +introduced the `Cleanup` function. The patch version (the ".0" in the +prior example) is used to fix any bugs found in this library and has no +correlation to the supported Go version. + ## Why?! **Why would I call a test helper that takes a *testing.T at runtime?** diff --git a/vendor/github.com/mitchellh/go-testing-interface/go.mod b/vendor/github.com/mitchellh/go-testing-interface/go.mod index 062796d..acc65c4 100644 --- a/vendor/github.com/mitchellh/go-testing-interface/go.mod +++ b/vendor/github.com/mitchellh/go-testing-interface/go.mod @@ -1 +1,3 @@ module github.com/mitchellh/go-testing-interface + +go 1.14 diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing.go b/vendor/github.com/mitchellh/go-testing-interface/testing.go index 204afb4..8651032 100644 --- a/vendor/github.com/mitchellh/go-testing-interface/testing.go +++ b/vendor/github.com/mitchellh/go-testing-interface/testing.go @@ -1,5 +1,3 @@ -// +build !go1.9 - package testing import ( @@ -12,6 +10,7 @@ import ( // In unit tests you can just pass a *testing.T struct. At runtime, outside // of tests, you can pass in a RuntimeT struct from this package. type T interface { + Cleanup(func()) Error(args ...interface{}) Errorf(format string, args ...interface{}) Fail() @@ -19,9 +18,11 @@ type T interface { Failed() bool Fatal(args ...interface{}) Fatalf(format string, args ...interface{}) + Helper() Log(args ...interface{}) Logf(format string, args ...interface{}) Name() string + Parallel() Skip(args ...interface{}) SkipNow() Skipf(format string, args ...interface{}) @@ -31,10 +32,15 @@ type T interface { // RuntimeT implements T and can be instantiated and run at runtime to // mimic *testing.T behavior. Unlike *testing.T, this will simply panic // for calls to Fatal. For calls to Error, you'll have to check the errors -// list to determine whether to exit yourself. Name and Skip methods are -// unimplemented noops. +// list to determine whether to exit yourself. +// +// Cleanup does NOT work, so if you're using a helper that uses Cleanup, +// there may be dangling resources. +// +// Parallel does not do anything. type RuntimeT struct { - failed bool + skipped bool + failed bool } func (t *RuntimeT) Error(args ...interface{}) { @@ -43,20 +49,10 @@ func (t *RuntimeT) Error(args ...interface{}) { } func (t *RuntimeT) Errorf(format string, args ...interface{}) { - log.Println(fmt.Sprintf(format, args...)) + log.Printf(format, args...) t.Fail() } -func (t *RuntimeT) Fatal(args ...interface{}) { - log.Println(fmt.Sprintln(args...)) - t.FailNow() -} - -func (t *RuntimeT) Fatalf(format string, args ...interface{}) { - log.Println(fmt.Sprintf(format, args...)) - t.FailNow() -} - func (t *RuntimeT) Fail() { t.failed = true } @@ -69,6 +65,16 @@ func (t *RuntimeT) Failed() bool { return t.failed } +func (t *RuntimeT) Fatal(args ...interface{}) { + log.Print(args...) + t.FailNow() +} + +func (t *RuntimeT) Fatalf(format string, args ...interface{}) { + log.Printf(format, args...) + t.FailNow() +} + func (t *RuntimeT) Log(args ...interface{}) { log.Println(fmt.Sprintln(args...)) } @@ -77,8 +83,30 @@ func (t *RuntimeT) Logf(format string, args ...interface{}) { log.Println(fmt.Sprintf(format, args...)) } -func (t *RuntimeT) Name() string { return "" } -func (t *RuntimeT) Skip(args ...interface{}) {} -func (t *RuntimeT) SkipNow() {} -func (t *RuntimeT) Skipf(format string, args ...interface{}) {} -func (t *RuntimeT) Skipped() bool { return false } +func (t *RuntimeT) Name() string { + return "" +} + +func (t *RuntimeT) Parallel() {} + +func (t *RuntimeT) Skip(args ...interface{}) { + log.Print(args...) + t.SkipNow() +} + +func (t *RuntimeT) SkipNow() { + t.skipped = true +} + +func (t *RuntimeT) Skipf(format string, args ...interface{}) { + log.Printf(format, args...) + t.SkipNow() +} + +func (t *RuntimeT) Skipped() bool { + return t.skipped +} + +func (t *RuntimeT) Helper() {} + +func (t *RuntimeT) Cleanup(func()) {} diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go deleted file mode 100644 index 31b42ca..0000000 --- a/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go +++ /dev/null @@ -1,108 +0,0 @@ -// +build go1.9 - -// NOTE: This is a temporary copy of testing.go for Go 1.9 with the addition -// of "Helper" to the T interface. Go 1.9 at the time of typing is in RC -// and is set for release shortly. We'll support this on master as the default -// as soon as 1.9 is released. - -package testing - -import ( - "fmt" - "log" -) - -// T is the interface that mimics the standard library *testing.T. -// -// In unit tests you can just pass a *testing.T struct. At runtime, outside -// of tests, you can pass in a RuntimeT struct from this package. -type T interface { - Error(args ...interface{}) - Errorf(format string, args ...interface{}) - Fail() - FailNow() - Failed() bool - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Log(args ...interface{}) - Logf(format string, args ...interface{}) - Name() string - Skip(args ...interface{}) - SkipNow() - Skipf(format string, args ...interface{}) - Skipped() bool - Helper() -} - -// RuntimeT implements T and can be instantiated and run at runtime to -// mimic *testing.T behavior. Unlike *testing.T, this will simply panic -// for calls to Fatal. For calls to Error, you'll have to check the errors -// list to determine whether to exit yourself. -type RuntimeT struct { - skipped bool - failed bool -} - -func (t *RuntimeT) Error(args ...interface{}) { - log.Println(fmt.Sprintln(args...)) - t.Fail() -} - -func (t *RuntimeT) Errorf(format string, args ...interface{}) { - log.Printf(format, args...) - t.Fail() -} - -func (t *RuntimeT) Fail() { - t.failed = true -} - -func (t *RuntimeT) FailNow() { - panic("testing.T failed, see logs for output (if any)") -} - -func (t *RuntimeT) Failed() bool { - return t.failed -} - -func (t *RuntimeT) Fatal(args ...interface{}) { - log.Print(args...) - t.FailNow() -} - -func (t *RuntimeT) Fatalf(format string, args ...interface{}) { - log.Printf(format, args...) - t.FailNow() -} - -func (t *RuntimeT) Log(args ...interface{}) { - log.Println(fmt.Sprintln(args...)) -} - -func (t *RuntimeT) Logf(format string, args ...interface{}) { - log.Println(fmt.Sprintf(format, args...)) -} - -func (t *RuntimeT) Name() string { - return "" -} - -func (t *RuntimeT) Skip(args ...interface{}) { - log.Print(args...) - t.SkipNow() -} - -func (t *RuntimeT) SkipNow() { - t.skipped = true -} - -func (t *RuntimeT) Skipf(format string, args ...interface{}) { - log.Printf(format, args...) - t.SkipNow() -} - -func (t *RuntimeT) Skipped() bool { - return t.skipped -} - -func (t *RuntimeT) Helper() {} diff --git a/vendor/github.com/oklog/run/.travis.yml b/vendor/github.com/oklog/run/.travis.yml deleted file mode 100644 index 362bdd4..0000000 --- a/vendor/github.com/oklog/run/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -sudo: false -go: - - 1.x - - tip -install: - - go get -v github.com/golang/lint/golint - - go build ./... -script: - - go vet ./... - - $HOME/gopath/bin/golint . - - go test -v -race ./... diff --git a/vendor/github.com/oklog/run/README.md b/vendor/github.com/oklog/run/README.md index a7228cd..eba7d11 100644 --- a/vendor/github.com/oklog/run/README.md +++ b/vendor/github.com/oklog/run/README.md @@ -1,7 +1,7 @@ # run [![GoDoc](https://godoc.org/github.com/oklog/run?status.svg)](https://godoc.org/github.com/oklog/run) -[![Build Status](https://travis-ci.org/oklog/run.svg?branch=master)](https://travis-ci.org/oklog/run) +[![Build Status](https://img.shields.io/endpoint.svg?url=https%3A%2F%2Factions-badge.atrox.dev%2Foklog%2Frun%2Fbadge&style=flat-square&label=build)](https://github.com/oklog/run/actions?query=workflow%3ATest) [![Go Report Card](https://goreportcard.com/badge/github.com/oklog/run)](https://goreportcard.com/report/github.com/oklog/run) [![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/run/master/LICENSE) @@ -10,9 +10,11 @@ run.Group is a universal mechanism to manage goroutine lifecycles. Create a zero-value run.Group, and then add actors to it. Actors are defined as a pair of functions: an **execute** function, which should run synchronously; and an **interrupt** function, which, when invoked, should cause the execute -function to return. Finally, invoke Run, which blocks until the first actor -returns. This general-purpose API allows callers to model pretty much any -runnable task, and achieve well-defined lifecycle semantics for the group. +function to return. Finally, invoke Run, which concurrently runs all of the +actors, waits until the first actor exits, invokes the interrupt functions, and +finally returns control to the caller only once all actors have returned. This +general-purpose API allows callers to model pretty much any runnable task, and +achieve well-defined lifecycle semantics for the group. run.Group was written to manage component lifecycles in func main for [OK Log](https://github.com/oklog/oklog). diff --git a/vendor/github.com/oklog/run/actors.go b/vendor/github.com/oklog/run/actors.go new file mode 100644 index 0000000..ef93495 --- /dev/null +++ b/vendor/github.com/oklog/run/actors.go @@ -0,0 +1,38 @@ +package run + +import ( + "context" + "fmt" + "os" + "os/signal" +) + +// SignalHandler returns an actor, i.e. an execute and interrupt func, that +// terminates with SignalError when the process receives one of the provided +// signals, or the parent context is canceled. +func SignalHandler(ctx context.Context, signals ...os.Signal) (execute func() error, interrupt func(error)) { + ctx, cancel := context.WithCancel(ctx) + return func() error { + c := make(chan os.Signal, 1) + signal.Notify(c, signals...) + select { + case sig := <-c: + return SignalError{Signal: sig} + case <-ctx.Done(): + return ctx.Err() + } + }, func(error) { + cancel() + } +} + +// SignalError is returned by the signal handler's execute function +// when it terminates due to a received signal. +type SignalError struct { + Signal os.Signal +} + +// Error implements the error interface. +func (e SignalError) Error() string { + return fmt.Sprintf("received signal %s", e.Signal) +} diff --git a/vendor/github.com/oklog/run/go.mod b/vendor/github.com/oklog/run/go.mod new file mode 100644 index 0000000..b129723 --- /dev/null +++ b/vendor/github.com/oklog/run/go.mod @@ -0,0 +1,3 @@ +module github.com/oklog/run + +go 1.13 diff --git a/vendor/github.com/pborman/uuid/go.mod b/vendor/github.com/pborman/uuid/go.mod new file mode 100644 index 0000000..099fc7d --- /dev/null +++ b/vendor/github.com/pborman/uuid/go.mod @@ -0,0 +1,3 @@ +module github.com/pborman/uuid + +require github.com/google/uuid v1.0.0 diff --git a/vendor/github.com/pborman/uuid/go.sum b/vendor/github.com/pborman/uuid/go.sum new file mode 100644 index 0000000..db2574a --- /dev/null +++ b/vendor/github.com/pborman/uuid/go.sum @@ -0,0 +1,2 @@ +github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/vendor/github.com/pborman/uuid/time.go b/vendor/github.com/pborman/uuid/time.go index 5c0960d..7286824 100644 --- a/vendor/github.com/pborman/uuid/time.go +++ b/vendor/github.com/pborman/uuid/time.go @@ -29,7 +29,7 @@ func GetTime() (Time, uint16, error) { return guuid.GetTime() } // for func ClockSequence() int { return guuid.ClockSequence() } -// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to // -1 causes a new sequence to be generated. func SetClockSequence(seq int) { guuid.SetClockSequence(seq) } diff --git a/vendor/github.com/pborman/uuid/uuid.go b/vendor/github.com/pborman/uuid/uuid.go index a2429b0..3370004 100644 --- a/vendor/github.com/pborman/uuid/uuid.go +++ b/vendor/github.com/pborman/uuid/uuid.go @@ -54,9 +54,8 @@ func New() string { return NewRandom().String() } -// Parse decodes s into a UUID or returns nil. Both the UUID form of -// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded. +// Parse decodes s into a UUID or returns nil. See github.com/google/uuid for +// the formats parsed. func Parse(s string) UUID { gu, err := guuid.Parse(s) if err == nil { diff --git a/vendor/github.com/pborman/uuid/version4.go b/vendor/github.com/pborman/uuid/version4.go index b459d46..767dd0c 100644 --- a/vendor/github.com/pborman/uuid/version4.go +++ b/vendor/github.com/pborman/uuid/version4.go @@ -6,7 +6,7 @@ package uuid import guuid "github.com/google/uuid" -// Random returns a Random (Version 4) UUID or panics. +// NewRandom returns a Random (Version 4) UUID or panics. // // The strength of the UUIDs is based on the strength of the crypto/rand // package. diff --git a/vendor/github.com/pelletier/go-toml/.travis.yml b/vendor/github.com/pelletier/go-toml/.travis.yml deleted file mode 100644 index abb03e9..0000000 --- a/vendor/github.com/pelletier/go-toml/.travis.yml +++ /dev/null @@ -1,22 +0,0 @@ -sudo: false -language: go -go: - - 1.11.x - - 1.12.x - - tip -matrix: - allow_failures: - - go: tip - fast_finish: true -env: - - GO111MODULE=on -script: - - if [ -n "$(go fmt ./...)" ]; then exit 1; fi - - go test github.com/pelletier/go-toml -race -coverprofile=coverage.txt -covermode=atomic - - go test github.com/pelletier/go-toml/cmd/tomljson - - go test github.com/pelletier/go-toml/cmd/tomll - - go test github.com/pelletier/go-toml/query - - ./benchmark.sh $TRAVIS_BRANCH https://github.com/$TRAVIS_REPO_SLUG.git - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/pelletier/go-toml/Dockerfile b/vendor/github.com/pelletier/go-toml/Dockerfile index 8f439d4..fffdb01 100644 --- a/vendor/github.com/pelletier/go-toml/Dockerfile +++ b/vendor/github.com/pelletier/go-toml/Dockerfile @@ -8,3 +8,4 @@ RUN go install ./... FROM scratch COPY --from=builder /go/bin/tomll /usr/bin/tomll COPY --from=builder /go/bin/tomljson /usr/bin/tomljson +COPY --from=builder /go/bin/jsontoml /usr/bin/jsontoml diff --git a/vendor/github.com/pelletier/go-toml/Makefile b/vendor/github.com/pelletier/go-toml/Makefile new file mode 100644 index 0000000..9e4503a --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/Makefile @@ -0,0 +1,29 @@ +export CGO_ENABLED=0 +go := go +go.goos ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f1) +go.goarch ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f2) + +out.tools := tomll tomljson jsontoml +out.dist := $(out.tools:=_$(go.goos)_$(go.goarch).tar.xz) +sources := $(wildcard **/*.go) + + +.PHONY: +tools: $(out.tools) + +$(out.tools): $(sources) + GOOS=$(go.goos) GOARCH=$(go.goarch) $(go) build ./cmd/$@ + +.PHONY: +dist: $(out.dist) + +$(out.dist):%_$(go.goos)_$(go.goarch).tar.xz: % + if [ "$(go.goos)" = "windows" ]; then \ + tar -cJf $@ $^.exe; \ + else \ + tar -cJf $@ $^; \ + fi + +.PHONY: +clean: + rm -rf $(out.tools) $(out.dist) diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md index f0311b9..6831deb 100644 --- a/vendor/github.com/pelletier/go-toml/README.md +++ b/vendor/github.com/pelletier/go-toml/README.md @@ -3,12 +3,11 @@ Go library for the [TOML](https://github.com/mojombo/toml) format. This library supports TOML version -[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) +[v1.0.0-rc.1](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v1.0.0-rc.1.md) [![GoDoc](https://godoc.org/github.com/pelletier/go-toml?status.svg)](http://godoc.org/github.com/pelletier/go-toml) [![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE) -[![Build Status](https://travis-ci.org/pelletier/go-toml.svg?branch=master)](https://travis-ci.org/pelletier/go-toml) -[![Windows Build status](https://ci.appveyor.com/api/projects/status/4aepwwjori266hkt/branch/master?svg=true)](https://ci.appveyor.com/project/pelletier/go-toml/branch/master) +[![Build Status](https://dev.azure.com/pelletierthomas/go-toml-ci/_apis/build/status/pelletier.go-toml?branchName=master)](https://dev.azure.com/pelletierthomas/go-toml-ci/_build/latest?definitionId=1&branchName=master) [![codecov](https://codecov.io/gh/pelletier/go-toml/branch/master/graph/badge.svg)](https://codecov.io/gh/pelletier/go-toml) [![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml?ref=badge_shield) @@ -19,7 +18,7 @@ Go-toml provides the following features for using data parsed from TOML document * Load TOML documents from files and string data * Easily navigate TOML structure using Tree -* Mashaling and unmarshaling to and from data structures +* Marshaling and unmarshaling to and from data structures * Line & column position data for all parsed elements * [Query support similar to JSON-Path](query/) * Syntax errors contain line and column numbers @@ -75,7 +74,7 @@ Or use a query: q, _ := query.Compile("$..[user,password]") results := q.Execute(config) for ii, item := range results.Values() { - fmt.Println("Query result %d: %v", ii, item) + fmt.Printf("Query result %d: %v\n", ii, item) } ``` @@ -88,7 +87,7 @@ The documentation and additional examples are available at Go-toml provides two handy command line tools: -* `tomll`: Reads TOML files and lint them. +* `tomll`: Reads TOML files and lints them. ``` go install github.com/pelletier/go-toml/cmd/tomll @@ -101,6 +100,13 @@ Go-toml provides two handy command line tools: tomljson --help ``` + * `jsontoml`: Reads a JSON file and outputs a TOML representation. + + ``` + go install github.com/pelletier/go-toml/cmd/jsontoml + jsontoml --help + ``` + ### Docker image Those tools are also availble as a Docker image from diff --git a/vendor/github.com/pelletier/go-toml/appveyor.yml b/vendor/github.com/pelletier/go-toml/appveyor.yml deleted file mode 100644 index 40e8a41..0000000 --- a/vendor/github.com/pelletier/go-toml/appveyor.yml +++ /dev/null @@ -1,34 +0,0 @@ -version: "{build}" - -# Source Config -clone_folder: c:\gopath\src\github.com\pelletier\go-toml - -# Build host -environment: - GOPATH: c:\gopath - DEPTESTBYPASS501: 1 - GOVERSION: 1.12 - GO111MODULE: on - -init: - - git config --global core.autocrlf input - -# Build -install: - # Install the specific Go version. - - rmdir c:\go /s /q - - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.msi - - msiexec /i go%GOVERSION%.windows-amd64.msi /q - - choco install bzr - - set Path=c:\go\bin;c:\gopath\bin;C:\Program Files (x86)\Bazaar\;C:\Program Files\Mercurial\%Path% - - go version - - go env - -build: false -deploy: false - -test_script: - - go test github.com/pelletier/go-toml - - go test github.com/pelletier/go-toml/cmd/tomljson - - go test github.com/pelletier/go-toml/cmd/tomll - - go test github.com/pelletier/go-toml/query diff --git a/vendor/github.com/pelletier/go-toml/azure-pipelines.yml b/vendor/github.com/pelletier/go-toml/azure-pipelines.yml new file mode 100644 index 0000000..ff5376b --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/azure-pipelines.yml @@ -0,0 +1,230 @@ +trigger: +- master + +stages: +- stage: fuzzit + displayName: "Run Fuzzit" + dependsOn: [] + condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master')) + jobs: + - job: submit + displayName: "Submit" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.15" + inputs: + version: "1.15" + - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" + - script: mkdir -p ${HOME}/go/src/github.com/pelletier/go-toml + - script: cp -R . ${HOME}/go/src/github.com/pelletier/go-toml + - task: Bash@3 + inputs: + filePath: './fuzzit.sh' + env: + TYPE: fuzzing + FUZZIT_API_KEY: $(FUZZIT_API_KEY) + +- stage: run_checks + displayName: "Check" + dependsOn: [] + jobs: + - job: fmt + displayName: "fmt" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.15" + inputs: + version: "1.15" + - task: Go@0 + displayName: "go fmt ./..." + inputs: + command: 'custom' + customCommand: 'fmt' + arguments: './...' + - job: coverage + displayName: "coverage" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.15" + inputs: + version: "1.15" + - task: Go@0 + displayName: "Generate coverage" + inputs: + command: 'test' + arguments: "-race -coverprofile=coverage.txt -covermode=atomic" + - task: Bash@3 + inputs: + targetType: 'inline' + script: 'bash <(curl -s https://codecov.io/bash) -t ${CODECOV_TOKEN}' + env: + CODECOV_TOKEN: $(CODECOV_TOKEN) + - job: benchmark + displayName: "benchmark" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.15" + inputs: + version: "1.15" + - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" + - task: Bash@3 + inputs: + filePath: './benchmark.sh' + arguments: "master $(Build.Repository.Uri)" + + - job: fuzzing + displayName: "fuzzing" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.15" + inputs: + version: "1.15" + - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" + - script: mkdir -p ${HOME}/go/src/github.com/pelletier/go-toml + - script: cp -R . ${HOME}/go/src/github.com/pelletier/go-toml + - task: Bash@3 + inputs: + filePath: './fuzzit.sh' + env: + TYPE: local-regression + + - job: go_unit_tests + displayName: "unit tests" + strategy: + matrix: + linux 1.15: + goVersion: '1.15' + imageName: 'ubuntu-latest' + mac 1.15: + goVersion: '1.15' + imageName: 'macOS-latest' + windows 1.15: + goVersion: '1.15' + imageName: 'windows-latest' + linux 1.14: + goVersion: '1.14' + imageName: 'ubuntu-latest' + mac 1.14: + goVersion: '1.14' + imageName: 'macOS-latest' + windows 1.14: + goVersion: '1.14' + imageName: 'windows-latest' + pool: + vmImage: $(imageName) + steps: + - task: GoTool@0 + displayName: "Install Go $(goVersion)" + inputs: + version: $(goVersion) + - task: Go@0 + displayName: "go test ./..." + inputs: + command: 'test' + arguments: './...' +- stage: build_binaries + displayName: "Build binaries" + dependsOn: run_checks + jobs: + - job: build_binary + displayName: "Build binary" + strategy: + matrix: + linux_amd64: + GOOS: linux + GOARCH: amd64 + darwin_amd64: + GOOS: darwin + GOARCH: amd64 + windows_amd64: + GOOS: windows + GOARCH: amd64 + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go" + inputs: + version: 1.15 + - task: Bash@3 + inputs: + targetType: inline + script: "make dist" + env: + go.goos: $(GOOS) + go.goarch: $(GOARCH) + - task: CopyFiles@2 + inputs: + sourceFolder: '$(Build.SourcesDirectory)' + contents: '*.tar.xz' + TargetFolder: '$(Build.ArtifactStagingDirectory)' + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: '$(Build.ArtifactStagingDirectory)' + artifactName: binaries +- stage: build_binaries_manifest + displayName: "Build binaries manifest" + dependsOn: build_binaries + jobs: + - job: build_manifest + displayName: "Build binaries manifest" + steps: + - task: DownloadBuildArtifacts@0 + inputs: + buildType: 'current' + downloadType: 'single' + artifactName: 'binaries' + downloadPath: '$(Build.SourcesDirectory)' + - task: Bash@3 + inputs: + targetType: inline + script: "cd binaries && sha256sum --binary *.tar.xz | tee $(Build.ArtifactStagingDirectory)/sha256sums.txt" + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: '$(Build.ArtifactStagingDirectory)' + artifactName: manifest + +- stage: build_docker_image + displayName: "Build Docker image" + dependsOn: run_checks + jobs: + - job: build + displayName: "Build" + pool: + vmImage: ubuntu-latest + steps: + - task: Docker@2 + inputs: + command: 'build' + Dockerfile: 'Dockerfile' + buildContext: '.' + addPipelineData: false + +- stage: publish_docker_image + displayName: "Publish Docker image" + dependsOn: build_docker_image + condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master')) + jobs: + - job: publish + displayName: "Publish" + pool: + vmImage: ubuntu-latest + steps: + - task: Docker@2 + inputs: + containerRegistry: 'DockerHub' + repository: 'pelletier/go-toml' + command: 'buildAndPush' + Dockerfile: 'Dockerfile' + buildContext: '.' + tags: 'latest' diff --git a/vendor/github.com/pelletier/go-toml/benchmark.json b/vendor/github.com/pelletier/go-toml/benchmark.json deleted file mode 100644 index 86f99c6..0000000 --- a/vendor/github.com/pelletier/go-toml/benchmark.json +++ /dev/null @@ -1,164 +0,0 @@ -{ - "array": { - "key1": [ - 1, - 2, - 3 - ], - "key2": [ - "red", - "yellow", - "green" - ], - "key3": [ - [ - 1, - 2 - ], - [ - 3, - 4, - 5 - ] - ], - "key4": [ - [ - 1, - 2 - ], - [ - "a", - "b", - "c" - ] - ], - "key5": [ - 1, - 2, - 3 - ], - "key6": [ - 1, - 2 - ] - }, - "boolean": { - "False": false, - "True": true - }, - "datetime": { - "key1": "1979-05-27T07:32:00Z", - "key2": "1979-05-27T00:32:00-07:00", - "key3": "1979-05-27T00:32:00.999999-07:00" - }, - "float": { - "both": { - "key": 6.626e-34 - }, - "exponent": { - "key1": 5e+22, - "key2": 1000000, - "key3": -0.02 - }, - "fractional": { - "key1": 1, - "key2": 3.1415, - "key3": -0.01 - }, - "underscores": { - "key1": 9224617.445991227, - "key2": 1e+100 - } - }, - "fruit": [{ - "name": "apple", - "physical": { - "color": "red", - "shape": "round" - }, - "variety": [{ - "name": "red delicious" - }, - { - "name": "granny smith" - } - ] - }, - { - "name": "banana", - "variety": [{ - "name": "plantain" - }] - } - ], - "integer": { - "key1": 99, - "key2": 42, - "key3": 0, - "key4": -17, - "underscores": { - "key1": 1000, - "key2": 5349221, - "key3": 12345 - } - }, - "products": [{ - "name": "Hammer", - "sku": 738594937 - }, - {}, - { - "color": "gray", - "name": "Nail", - "sku": 284758393 - } - ], - "string": { - "basic": { - "basic": "I'm a string. \"You can quote me\". Name\tJosé\nLocation\tSF." - }, - "literal": { - "multiline": { - "lines": "The first newline is\ntrimmed in raw strings.\n All other whitespace\n is preserved.\n", - "regex2": "I [dw]on't need \\d{2} apples" - }, - "quoted": "Tom \"Dubs\" Preston-Werner", - "regex": "\u003c\\i\\c*\\s*\u003e", - "winpath": "C:\\Users\\nodejs\\templates", - "winpath2": "\\\\ServerX\\admin$\\system32\\" - }, - "multiline": { - "continued": { - "key1": "The quick brown fox jumps over the lazy dog.", - "key2": "The quick brown fox jumps over the lazy dog.", - "key3": "The quick brown fox jumps over the lazy dog." - }, - "key1": "One\nTwo", - "key2": "One\nTwo", - "key3": "One\nTwo" - } - }, - "table": { - "inline": { - "name": { - "first": "Tom", - "last": "Preston-Werner" - }, - "point": { - "x": 1, - "y": 2 - } - }, - "key": "value", - "subtable": { - "key": "another value" - } - }, - "x": { - "y": { - "z": { - "w": {} - } - } - } -} diff --git a/vendor/github.com/pelletier/go-toml/benchmark.sh b/vendor/github.com/pelletier/go-toml/benchmark.sh old mode 100755 new mode 100644 index 8b8bb52..a69d304 --- a/vendor/github.com/pelletier/go-toml/benchmark.sh +++ b/vendor/github.com/pelletier/go-toml/benchmark.sh @@ -1,6 +1,6 @@ #!/bin/bash -set -e +set -ex reference_ref=${1:-master} reference_git=${2:-.} @@ -8,7 +8,6 @@ reference_git=${2:-.} if ! `hash benchstat 2>/dev/null`; then echo "Installing benchstat" go get golang.org/x/perf/cmd/benchstat - go install golang.org/x/perf/cmd/benchstat fi tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX` @@ -21,12 +20,16 @@ git clone ${reference_git} ${ref_tempdir} >/dev/null 2>/dev/null pushd ${ref_tempdir} >/dev/null git checkout ${reference_ref} >/dev/null 2>/dev/null go test -bench=. -benchmem | tee ${ref_benchmark} +cd benchmark +go test -bench=. -benchmem | tee -a ${ref_benchmark} popd >/dev/null echo "" echo "=== local" go test -bench=. -benchmem | tee ${local_benchmark} +cd benchmark +go test -bench=. -benchmem | tee -a ${local_benchmark} echo "" echo "=== diff" -benchstat -delta-test=none ${ref_benchmark} ${local_benchmark} \ No newline at end of file +benchstat -delta-test=none ${ref_benchmark} ${local_benchmark} diff --git a/vendor/github.com/pelletier/go-toml/benchmark.toml b/vendor/github.com/pelletier/go-toml/benchmark.toml deleted file mode 100644 index dfd77e0..0000000 --- a/vendor/github.com/pelletier/go-toml/benchmark.toml +++ /dev/null @@ -1,244 +0,0 @@ -################################################################################ -## Comment - -# Speak your mind with the hash symbol. They go from the symbol to the end of -# the line. - - -################################################################################ -## Table - -# Tables (also known as hash tables or dictionaries) are collections of -# key/value pairs. They appear in square brackets on a line by themselves. - -[table] - -key = "value" # Yeah, you can do this. - -# Nested tables are denoted by table names with dots in them. Name your tables -# whatever crap you please, just don't use #, ., [ or ]. - -[table.subtable] - -key = "another value" - -# You don't need to specify all the super-tables if you don't want to. TOML -# knows how to do it for you. - -# [x] you -# [x.y] don't -# [x.y.z] need these -[x.y.z.w] # for this to work - - -################################################################################ -## Inline Table - -# Inline tables provide a more compact syntax for expressing tables. They are -# especially useful for grouped data that can otherwise quickly become verbose. -# Inline tables are enclosed in curly braces `{` and `}`. No newlines are -# allowed between the curly braces unless they are valid within a value. - -[table.inline] - -name = { first = "Tom", last = "Preston-Werner" } -point = { x = 1, y = 2 } - - -################################################################################ -## String - -# There are four ways to express strings: basic, multi-line basic, literal, and -# multi-line literal. All strings must contain only valid UTF-8 characters. - -[string.basic] - -basic = "I'm a string. \"You can quote me\". Name\tJos\u00E9\nLocation\tSF." - -[string.multiline] - -# The following strings are byte-for-byte equivalent: -key1 = "One\nTwo" -key2 = """One\nTwo""" -key3 = """ -One -Two""" - -[string.multiline.continued] - -# The following strings are byte-for-byte equivalent: -key1 = "The quick brown fox jumps over the lazy dog." - -key2 = """ -The quick brown \ - - - fox jumps over \ - the lazy dog.""" - -key3 = """\ - The quick brown \ - fox jumps over \ - the lazy dog.\ - """ - -[string.literal] - -# What you see is what you get. -winpath = 'C:\Users\nodejs\templates' -winpath2 = '\\ServerX\admin$\system32\' -quoted = 'Tom "Dubs" Preston-Werner' -regex = '<\i\c*\s*>' - - -[string.literal.multiline] - -regex2 = '''I [dw]on't need \d{2} apples''' -lines = ''' -The first newline is -trimmed in raw strings. - All other whitespace - is preserved. -''' - - -################################################################################ -## Integer - -# Integers are whole numbers. Positive numbers may be prefixed with a plus sign. -# Negative numbers are prefixed with a minus sign. - -[integer] - -key1 = +99 -key2 = 42 -key3 = 0 -key4 = -17 - -[integer.underscores] - -# For large numbers, you may use underscores to enhance readability. Each -# underscore must be surrounded by at least one digit. -key1 = 1_000 -key2 = 5_349_221 -key3 = 1_2_3_4_5 # valid but inadvisable - - -################################################################################ -## Float - -# A float consists of an integer part (which may be prefixed with a plus or -# minus sign) followed by a fractional part and/or an exponent part. - -[float.fractional] - -key1 = +1.0 -key2 = 3.1415 -key3 = -0.01 - -[float.exponent] - -key1 = 5e+22 -key2 = 1e6 -key3 = -2E-2 - -[float.both] - -key = 6.626e-34 - -[float.underscores] - -key1 = 9_224_617.445_991_228_313 -key2 = 1e1_00 - - -################################################################################ -## Boolean - -# Booleans are just the tokens you're used to. Always lowercase. - -[boolean] - -True = true -False = false - - -################################################################################ -## Datetime - -# Datetimes are RFC 3339 dates. - -[datetime] - -key1 = 1979-05-27T07:32:00Z -key2 = 1979-05-27T00:32:00-07:00 -key3 = 1979-05-27T00:32:00.999999-07:00 - - -################################################################################ -## Array - -# Arrays are square brackets with other primitives inside. Whitespace is -# ignored. Elements are separated by commas. Data types may not be mixed. - -[array] - -key1 = [ 1, 2, 3 ] -key2 = [ "red", "yellow", "green" ] -key3 = [ [ 1, 2 ], [3, 4, 5] ] -#key4 = [ [ 1, 2 ], ["a", "b", "c"] ] # this is ok - -# Arrays can also be multiline. So in addition to ignoring whitespace, arrays -# also ignore newlines between the brackets. Terminating commas are ok before -# the closing bracket. - -key5 = [ - 1, 2, 3 -] -key6 = [ - 1, - 2, # this is ok -] - - -################################################################################ -## Array of Tables - -# These can be expressed by using a table name in double brackets. Each table -# with the same double bracketed name will be an element in the array. The -# tables are inserted in the order encountered. - -[[products]] - -name = "Hammer" -sku = 738594937 - -[[products]] - -[[products]] - -name = "Nail" -sku = 284758393 -color = "gray" - - -# You can create nested arrays of tables as well. - -[[fruit]] - name = "apple" - - [fruit.physical] - color = "red" - shape = "round" - - [[fruit.variety]] - name = "red delicious" - - [[fruit.variety]] - name = "granny smith" - -[[fruit]] - name = "banana" - - [[fruit.variety]] - name = "plantain" diff --git a/vendor/github.com/pelletier/go-toml/benchmark.yml b/vendor/github.com/pelletier/go-toml/benchmark.yml deleted file mode 100644 index 0bd19f0..0000000 --- a/vendor/github.com/pelletier/go-toml/benchmark.yml +++ /dev/null @@ -1,121 +0,0 @@ ---- -array: - key1: - - 1 - - 2 - - 3 - key2: - - red - - yellow - - green - key3: - - - 1 - - 2 - - - 3 - - 4 - - 5 - key4: - - - 1 - - 2 - - - a - - b - - c - key5: - - 1 - - 2 - - 3 - key6: - - 1 - - 2 -boolean: - 'False': false - 'True': true -datetime: - key1: '1979-05-27T07:32:00Z' - key2: '1979-05-27T00:32:00-07:00' - key3: '1979-05-27T00:32:00.999999-07:00' -float: - both: - key: 6.626e-34 - exponent: - key1: 5.0e+22 - key2: 1000000 - key3: -0.02 - fractional: - key1: 1 - key2: 3.1415 - key3: -0.01 - underscores: - key1: 9224617.445991227 - key2: 1.0e+100 -fruit: -- name: apple - physical: - color: red - shape: round - variety: - - name: red delicious - - name: granny smith -- name: banana - variety: - - name: plantain -integer: - key1: 99 - key2: 42 - key3: 0 - key4: -17 - underscores: - key1: 1000 - key2: 5349221 - key3: 12345 -products: -- name: Hammer - sku: 738594937 -- {} -- color: gray - name: Nail - sku: 284758393 -string: - basic: - basic: "I'm a string. \"You can quote me\". Name\tJosé\nLocation\tSF." - literal: - multiline: - lines: | - The first newline is - trimmed in raw strings. - All other whitespace - is preserved. - regex2: I [dw]on't need \d{2} apples - quoted: Tom "Dubs" Preston-Werner - regex: "<\\i\\c*\\s*>" - winpath: C:\Users\nodejs\templates - winpath2: "\\\\ServerX\\admin$\\system32\\" - multiline: - continued: - key1: The quick brown fox jumps over the lazy dog. - key2: The quick brown fox jumps over the lazy dog. - key3: The quick brown fox jumps over the lazy dog. - key1: |- - One - Two - key2: |- - One - Two - key3: |- - One - Two -table: - inline: - name: - first: Tom - last: Preston-Werner - point: - x: 1 - y: 2 - key: value - subtable: - key: another value -x: - y: - z: - w: {} diff --git a/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/pelletier/go-toml/doc.go index d5fd98c..a1406a3 100644 --- a/vendor/github.com/pelletier/go-toml/doc.go +++ b/vendor/github.com/pelletier/go-toml/doc.go @@ -1,7 +1,7 @@ // Package toml is a TOML parser and manipulation library. // // This version supports the specification as described in -// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md +// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.5.0.md // // Marshaling // diff --git a/vendor/github.com/pelletier/go-toml/example-crlf.toml b/vendor/github.com/pelletier/go-toml/example-crlf.toml index 12950a1..780d9c6 100644 --- a/vendor/github.com/pelletier/go-toml/example-crlf.toml +++ b/vendor/github.com/pelletier/go-toml/example-crlf.toml @@ -27,3 +27,4 @@ enabled = true [clients] data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it +score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/example.toml b/vendor/github.com/pelletier/go-toml/example.toml index 3d902f2..f45bf88 100644 --- a/vendor/github.com/pelletier/go-toml/example.toml +++ b/vendor/github.com/pelletier/go-toml/example.toml @@ -27,3 +27,4 @@ enabled = true [clients] data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it +score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/fuzz.sh b/vendor/github.com/pelletier/go-toml/fuzz.sh old mode 100755 new mode 100644 diff --git a/vendor/github.com/pelletier/go-toml/fuzzit.sh b/vendor/github.com/pelletier/go-toml/fuzzit.sh new file mode 100644 index 0000000..b575a60 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/fuzzit.sh @@ -0,0 +1,26 @@ +#!/bin/bash +set -xe + +# go-fuzz doesn't support modules yet, so ensure we do everything +# in the old style GOPATH way +export GO111MODULE="off" + +# install go-fuzz +go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build + +# target name can only contain lower-case letters (a-z), digits (0-9) and a dash (-) +# to add another target, make sure to create it with `fuzzit create target` +# before using `fuzzit create job` +TARGET=toml-fuzzer + +go-fuzz-build -libfuzzer -o ${TARGET}.a github.com/pelletier/go-toml +clang -fsanitize=fuzzer ${TARGET}.a -o ${TARGET} + +# install fuzzit for talking to fuzzit.dev service +# or latest version: +# https://github.com/fuzzitdev/fuzzit/releases/latest/download/fuzzit_Linux_x86_64 +wget -q -O fuzzit https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.52/fuzzit_Linux_x86_64 +chmod a+x fuzzit + +# TODO: change kkowalczyk to go-toml and create toml-fuzzer target there +./fuzzit create job --type $TYPE go-toml/${TARGET} ${TARGET} diff --git a/vendor/github.com/pelletier/go-toml/go.mod b/vendor/github.com/pelletier/go-toml/go.mod index f4690e1..e924cb9 100644 --- a/vendor/github.com/pelletier/go-toml/go.mod +++ b/vendor/github.com/pelletier/go-toml/go.mod @@ -2,8 +2,4 @@ module github.com/pelletier/go-toml go 1.12 -require ( - github.com/BurntSushi/toml v0.3.1 - github.com/davecgh/go-spew v1.1.1 - gopkg.in/yaml.v2 v2.2.2 -) +require github.com/davecgh/go-spew v1.1.1 diff --git a/vendor/github.com/pelletier/go-toml/go.sum b/vendor/github.com/pelletier/go-toml/go.sum index 8d91a47..6f35647 100644 --- a/vendor/github.com/pelletier/go-toml/go.sum +++ b/vendor/github.com/pelletier/go-toml/go.sum @@ -5,3 +5,15 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3 h1:fvjTMHxHEw/mxHbtzPi3JCcKXQRAnQTBRo6YCJSVHKI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go index e923bc4..e091500 100644 --- a/vendor/github.com/pelletier/go-toml/keysparsing.go +++ b/vendor/github.com/pelletier/go-toml/keysparsing.go @@ -5,7 +5,6 @@ package toml import ( "errors" "fmt" - "unicode" ) // Convert the bare key group string to an array. @@ -109,5 +108,5 @@ func parseKey(key string) ([]string, error) { } func isValidBareChar(r rune) bool { - return isAlphanumeric(r) || r == '-' || unicode.IsNumber(r) + return isAlphanumeric(r) || r == '-' || isDigit(r) } diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go index 6254d39..b188619 100644 --- a/vendor/github.com/pelletier/go-toml/lexer.go +++ b/vendor/github.com/pelletier/go-toml/lexer.go @@ -26,7 +26,7 @@ type tomlLexer struct { currentTokenStart int currentTokenStop int tokens []token - depth int + brackets []rune line int col int endbufferLine int @@ -123,6 +123,8 @@ func (l *tomlLexer) lexVoid() tomlLexStateFn { for { next := l.peek() switch next { + case '}': // after '{' + return l.lexRightCurlyBrace case '[': return l.lexTableKey case '#': @@ -140,10 +142,6 @@ func (l *tomlLexer) lexVoid() tomlLexStateFn { l.skip() } - if l.depth > 0 { - return l.lexRvalue - } - if isKeyStartChar(next) { return l.lexKey } @@ -167,10 +165,8 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn { case '=': return l.lexEqual case '[': - l.depth++ return l.lexLeftBracket case ']': - l.depth-- return l.lexRightBracket case '{': return l.lexLeftCurlyBrace @@ -188,12 +184,10 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn { fallthrough case '\n': l.skip() - if l.depth == 0 { - return l.lexVoid + if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '[' { + return l.lexRvalue } - return l.lexRvalue - case '_': - return l.errorf("cannot start number with underscore") + return l.lexVoid } if l.follow("true") { @@ -223,9 +217,12 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn { } possibleDate := l.peekString(35) - dateMatch := dateRegexp.FindString(possibleDate) - if dateMatch != "" { - l.fastForward(len(dateMatch)) + dateSubmatches := dateRegexp.FindStringSubmatch(possibleDate) + if dateSubmatches != nil && dateSubmatches[0] != "" { + l.fastForward(len(dateSubmatches[0])) + if dateSubmatches[2] == "" { // no timezone information => local date + return l.lexLocalDate + } return l.lexDate } @@ -233,10 +230,6 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn { return l.lexNumber } - if isAlphanumeric(next) { - return l.lexKey - } - return l.errorf("no value can start with %c", next) } @@ -247,12 +240,17 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn { func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn { l.next() l.emit(tokenLeftCurlyBrace) - return l.lexRvalue + l.brackets = append(l.brackets, '{') + return l.lexVoid } func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn { l.next() l.emit(tokenRightCurlyBrace) + if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '{' { + return l.errorf("cannot have '}' here") + } + l.brackets = l.brackets[:len(l.brackets)-1] return l.lexRvalue } @@ -261,6 +259,11 @@ func (l *tomlLexer) lexDate() tomlLexStateFn { return l.lexRvalue } +func (l *tomlLexer) lexLocalDate() tomlLexStateFn { + l.emit(tokenLocalDate) + return l.lexRvalue +} + func (l *tomlLexer) lexTrue() tomlLexStateFn { l.fastForward(4) l.emit(tokenTrue) @@ -294,13 +297,16 @@ func (l *tomlLexer) lexEqual() tomlLexStateFn { func (l *tomlLexer) lexComma() tomlLexStateFn { l.next() l.emit(tokenComma) + if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '{' { + return l.lexVoid + } return l.lexRvalue } // Parse the key and emits its value without escape sequences. // bare keys, basic string keys and literal string keys are supported. func (l *tomlLexer) lexKey() tomlLexStateFn { - growingString := "" + var sb strings.Builder for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() { if r == '"' { @@ -309,7 +315,9 @@ func (l *tomlLexer) lexKey() tomlLexStateFn { if err != nil { return l.errorf(err.Error()) } - growingString += "\"" + str + "\"" + sb.WriteString("\"") + sb.WriteString(str) + sb.WriteString("\"") l.next() continue } else if r == '\'' { @@ -318,22 +326,45 @@ func (l *tomlLexer) lexKey() tomlLexStateFn { if err != nil { return l.errorf(err.Error()) } - growingString += "'" + str + "'" + sb.WriteString("'") + sb.WriteString(str) + sb.WriteString("'") l.next() continue } else if r == '\n' { return l.errorf("keys cannot contain new lines") } else if isSpace(r) { - break + var str strings.Builder + str.WriteString(" ") + + // skip trailing whitespace + l.next() + for r = l.peek(); isSpace(r); r = l.peek() { + str.WriteRune(r) + l.next() + } + // break loop if not a dot + if r != '.' { + break + } + str.WriteString(".") + // skip trailing whitespace after dot + l.next() + for r = l.peek(); isSpace(r); r = l.peek() { + str.WriteRune(r) + l.next() + } + sb.WriteString(str.String()) + continue } else if r == '.' { // skip } else if !isValidBareChar(r) { return l.errorf("keys cannot contain %c character", r) } - growingString += string(r) + sb.WriteRune(r) l.next() } - l.emitWithValue(tokenKey, growingString) + l.emitWithValue(tokenKey, sb.String()) return l.lexVoid } @@ -353,11 +384,12 @@ func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn { func (l *tomlLexer) lexLeftBracket() tomlLexStateFn { l.next() l.emit(tokenLeftBracket) + l.brackets = append(l.brackets, '[') return l.lexRvalue } func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) { - growingString := "" + var sb strings.Builder if discardLeadingNewLine { if l.follow("\r\n") { @@ -371,14 +403,14 @@ func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNe // find end of string for { if l.follow(terminator) { - return growingString, nil + return sb.String(), nil } next := l.peek() if next == eof { break } - growingString += string(l.next()) + sb.WriteRune(l.next()) } return "", errors.New("unclosed string") @@ -412,7 +444,7 @@ func (l *tomlLexer) lexLiteralString() tomlLexStateFn { // Terminator is the substring indicating the end of the token. // The resulting string does not include the terminator. func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) { - growingString := "" + var sb strings.Builder if discardLeadingNewLine { if l.follow("\r\n") { @@ -425,7 +457,7 @@ func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, for { if l.follow(terminator) { - return growingString, nil + return sb.String(), nil } if l.follow("\\") { @@ -443,72 +475,72 @@ func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, l.next() } case '"': - growingString += "\"" + sb.WriteString("\"") l.next() case 'n': - growingString += "\n" + sb.WriteString("\n") l.next() case 'b': - growingString += "\b" + sb.WriteString("\b") l.next() case 'f': - growingString += "\f" + sb.WriteString("\f") l.next() case '/': - growingString += "/" + sb.WriteString("/") l.next() case 't': - growingString += "\t" + sb.WriteString("\t") l.next() case 'r': - growingString += "\r" + sb.WriteString("\r") l.next() case '\\': - growingString += "\\" + sb.WriteString("\\") l.next() case 'u': l.next() - code := "" + var code strings.Builder for i := 0; i < 4; i++ { c := l.peek() if !isHexDigit(c) { return "", errors.New("unfinished unicode escape") } l.next() - code = code + string(c) + code.WriteRune(c) } - intcode, err := strconv.ParseInt(code, 16, 32) + intcode, err := strconv.ParseInt(code.String(), 16, 32) if err != nil { - return "", errors.New("invalid unicode escape: \\u" + code) + return "", errors.New("invalid unicode escape: \\u" + code.String()) } - growingString += string(rune(intcode)) + sb.WriteRune(rune(intcode)) case 'U': l.next() - code := "" + var code strings.Builder for i := 0; i < 8; i++ { c := l.peek() if !isHexDigit(c) { return "", errors.New("unfinished unicode escape") } l.next() - code = code + string(c) + code.WriteRune(c) } - intcode, err := strconv.ParseInt(code, 16, 64) + intcode, err := strconv.ParseInt(code.String(), 16, 64) if err != nil { - return "", errors.New("invalid unicode escape: \\U" + code) + return "", errors.New("invalid unicode escape: \\U" + code.String()) } - growingString += string(rune(intcode)) + sb.WriteRune(rune(intcode)) default: return "", errors.New("invalid escape sequence: \\" + string(l.peek())) } } else { r := l.peek() - if 0x00 <= r && r <= 0x1F && !(acceptNewLines && (r == '\n' || r == '\r')) { + if 0x00 <= r && r <= 0x1F && r != '\t' && !(acceptNewLines && (r == '\n' || r == '\r')) { return "", fmt.Errorf("unescaped control character %U", r) } l.next() - growingString += string(r) + sb.WriteRune(r) } if l.peek() == eof { @@ -535,7 +567,6 @@ func (l *tomlLexer) lexString() tomlLexStateFn { } str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines) - if err != nil { return l.errorf(err.Error()) } @@ -607,6 +638,10 @@ func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn { func (l *tomlLexer) lexRightBracket() tomlLexStateFn { l.next() l.emit(tokenRightBracket) + if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '[' { + return l.errorf("cannot have ']' here") + } + l.brackets = l.brackets[:len(l.brackets)-1] return l.lexRvalue } @@ -733,7 +768,27 @@ func (l *tomlLexer) run() { } func init() { - dateRegexp = regexp.MustCompile(`^\d{1,4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{1,9})?(Z|[+-]\d{2}:\d{2})`) + // Regexp for all date/time formats supported by TOML. + // Group 1: nano precision + // Group 2: timezone + // + // /!\ also matches the empty string + // + // Example matches: + // 1979-05-27T07:32:00Z + // 1979-05-27T00:32:00-07:00 + // 1979-05-27T00:32:00.999999-07:00 + // 1979-05-27 07:32:00Z + // 1979-05-27 00:32:00-07:00 + // 1979-05-27 00:32:00.999999-07:00 + // 1979-05-27T07:32:00 + // 1979-05-27T00:32:00.999999 + // 1979-05-27 07:32:00 + // 1979-05-27 00:32:00.999999 + // 1979-05-27 + // 07:32:00 + // 00:32:00.999999 + dateRegexp = regexp.MustCompile(`^(?:\d{1,4}-\d{2}-\d{2})?(?:[T ]?\d{2}:\d{2}:\d{2}(\.\d{1,9})?(Z|[+-]\d{2}:\d{2})?)?`) } // Entry point diff --git a/vendor/github.com/pelletier/go-toml/localtime.go b/vendor/github.com/pelletier/go-toml/localtime.go new file mode 100644 index 0000000..a2149e9 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/localtime.go @@ -0,0 +1,281 @@ +// Implementation of TOML's local date/time. +// Copied over from https://github.com/googleapis/google-cloud-go/blob/master/civil/civil.go +// to avoid pulling all the Google dependencies. +// +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package civil implements types for civil time, a time-zone-independent +// representation of time that follows the rules of the proleptic +// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second +// minutes. +// +// Because they lack location information, these types do not represent unique +// moments or intervals of time. Use time.Time for that purpose. +package toml + +import ( + "fmt" + "time" +) + +// A LocalDate represents a date (year, month, day). +// +// This type does not include location information, and therefore does not +// describe a unique 24-hour timespan. +type LocalDate struct { + Year int // Year (e.g., 2014). + Month time.Month // Month of the year (January = 1, ...). + Day int // Day of the month, starting at 1. +} + +// LocalDateOf returns the LocalDate in which a time occurs in that time's location. +func LocalDateOf(t time.Time) LocalDate { + var d LocalDate + d.Year, d.Month, d.Day = t.Date() + return d +} + +// ParseLocalDate parses a string in RFC3339 full-date format and returns the date value it represents. +func ParseLocalDate(s string) (LocalDate, error) { + t, err := time.Parse("2006-01-02", s) + if err != nil { + return LocalDate{}, err + } + return LocalDateOf(t), nil +} + +// String returns the date in RFC3339 full-date format. +func (d LocalDate) String() string { + return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) +} + +// IsValid reports whether the date is valid. +func (d LocalDate) IsValid() bool { + return LocalDateOf(d.In(time.UTC)) == d +} + +// In returns the time corresponding to time 00:00:00 of the date in the location. +// +// In is always consistent with time.LocalDate, even when time.LocalDate returns a time +// on a different day. For example, if loc is America/Indiana/Vincennes, then both +// time.LocalDate(1955, time.May, 1, 0, 0, 0, 0, loc) +// and +// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}.In(loc) +// return 23:00:00 on April 30, 1955. +// +// In panics if loc is nil. +func (d LocalDate) In(loc *time.Location) time.Time { + return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc) +} + +// AddDays returns the date that is n days in the future. +// n can also be negative to go into the past. +func (d LocalDate) AddDays(n int) LocalDate { + return LocalDateOf(d.In(time.UTC).AddDate(0, 0, n)) +} + +// DaysSince returns the signed number of days between the date and s, not including the end day. +// This is the inverse operation to AddDays. +func (d LocalDate) DaysSince(s LocalDate) (days int) { + // We convert to Unix time so we do not have to worry about leap seconds: + // Unix time increases by exactly 86400 seconds per day. + deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix() + return int(deltaUnix / 86400) +} + +// Before reports whether d1 occurs before d2. +func (d1 LocalDate) Before(d2 LocalDate) bool { + if d1.Year != d2.Year { + return d1.Year < d2.Year + } + if d1.Month != d2.Month { + return d1.Month < d2.Month + } + return d1.Day < d2.Day +} + +// After reports whether d1 occurs after d2. +func (d1 LocalDate) After(d2 LocalDate) bool { + return d2.Before(d1) +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of d.String(). +func (d LocalDate) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The date is expected to be a string in a format accepted by ParseLocalDate. +func (d *LocalDate) UnmarshalText(data []byte) error { + var err error + *d, err = ParseLocalDate(string(data)) + return err +} + +// A LocalTime represents a time with nanosecond precision. +// +// This type does not include location information, and therefore does not +// describe a unique moment in time. +// +// This type exists to represent the TIME type in storage-based APIs like BigQuery. +// Most operations on Times are unlikely to be meaningful. Prefer the LocalDateTime type. +type LocalTime struct { + Hour int // The hour of the day in 24-hour format; range [0-23] + Minute int // The minute of the hour; range [0-59] + Second int // The second of the minute; range [0-59] + Nanosecond int // The nanosecond of the second; range [0-999999999] +} + +// LocalTimeOf returns the LocalTime representing the time of day in which a time occurs +// in that time's location. It ignores the date. +func LocalTimeOf(t time.Time) LocalTime { + var tm LocalTime + tm.Hour, tm.Minute, tm.Second = t.Clock() + tm.Nanosecond = t.Nanosecond() + return tm +} + +// ParseLocalTime parses a string and returns the time value it represents. +// ParseLocalTime accepts an extended form of the RFC3339 partial-time format. After +// the HH:MM:SS part of the string, an optional fractional part may appear, +// consisting of a decimal point followed by one to nine decimal digits. +// (RFC3339 admits only one digit after the decimal point). +func ParseLocalTime(s string) (LocalTime, error) { + t, err := time.Parse("15:04:05.999999999", s) + if err != nil { + return LocalTime{}, err + } + return LocalTimeOf(t), nil +} + +// String returns the date in the format described in ParseLocalTime. If Nanoseconds +// is zero, no fractional part will be generated. Otherwise, the result will +// end with a fractional part consisting of a decimal point and nine digits. +func (t LocalTime) String() string { + s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second) + if t.Nanosecond == 0 { + return s + } + return s + fmt.Sprintf(".%09d", t.Nanosecond) +} + +// IsValid reports whether the time is valid. +func (t LocalTime) IsValid() bool { + // Construct a non-zero time. + tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC) + return LocalTimeOf(tm) == t +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of t.String(). +func (t LocalTime) MarshalText() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The time is expected to be a string in a format accepted by ParseLocalTime. +func (t *LocalTime) UnmarshalText(data []byte) error { + var err error + *t, err = ParseLocalTime(string(data)) + return err +} + +// A LocalDateTime represents a date and time. +// +// This type does not include location information, and therefore does not +// describe a unique moment in time. +type LocalDateTime struct { + Date LocalDate + Time LocalTime +} + +// Note: We deliberately do not embed LocalDate into LocalDateTime, to avoid promoting AddDays and Sub. + +// LocalDateTimeOf returns the LocalDateTime in which a time occurs in that time's location. +func LocalDateTimeOf(t time.Time) LocalDateTime { + return LocalDateTime{ + Date: LocalDateOf(t), + Time: LocalTimeOf(t), + } +} + +// ParseLocalDateTime parses a string and returns the LocalDateTime it represents. +// ParseLocalDateTime accepts a variant of the RFC3339 date-time format that omits +// the time offset but includes an optional fractional time, as described in +// ParseLocalTime. Informally, the accepted format is +// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF] +// where the 'T' may be a lower-case 't'. +func ParseLocalDateTime(s string) (LocalDateTime, error) { + t, err := time.Parse("2006-01-02T15:04:05.999999999", s) + if err != nil { + t, err = time.Parse("2006-01-02t15:04:05.999999999", s) + if err != nil { + return LocalDateTime{}, err + } + } + return LocalDateTimeOf(t), nil +} + +// String returns the date in the format described in ParseLocalDate. +func (dt LocalDateTime) String() string { + return dt.Date.String() + "T" + dt.Time.String() +} + +// IsValid reports whether the datetime is valid. +func (dt LocalDateTime) IsValid() bool { + return dt.Date.IsValid() && dt.Time.IsValid() +} + +// In returns the time corresponding to the LocalDateTime in the given location. +// +// If the time is missing or ambigous at the location, In returns the same +// result as time.LocalDate. For example, if loc is America/Indiana/Vincennes, then +// both +// time.LocalDate(1955, time.May, 1, 0, 30, 0, 0, loc) +// and +// civil.LocalDateTime{ +// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}}, +// civil.LocalTime{Minute: 30}}.In(loc) +// return 23:30:00 on April 30, 1955. +// +// In panics if loc is nil. +func (dt LocalDateTime) In(loc *time.Location) time.Time { + return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc) +} + +// Before reports whether dt1 occurs before dt2. +func (dt1 LocalDateTime) Before(dt2 LocalDateTime) bool { + return dt1.In(time.UTC).Before(dt2.In(time.UTC)) +} + +// After reports whether dt1 occurs after dt2. +func (dt1 LocalDateTime) After(dt2 LocalDateTime) bool { + return dt2.Before(dt1) +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of dt.String(). +func (dt LocalDateTime) MarshalText() ([]byte, error) { + return []byte(dt.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The datetime is expected to be a string in a format accepted by ParseLocalDateTime +func (dt *LocalDateTime) UnmarshalText(data []byte) error { + var err error + *dt, err = ParseLocalDateTime(string(data)) + return err +} diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go index 0e1c57e..032e0ff 100644 --- a/vendor/github.com/pelletier/go-toml/marshal.go +++ b/vendor/github.com/pelletier/go-toml/marshal.go @@ -2,6 +2,7 @@ package toml import ( "bytes" + "encoding" "errors" "fmt" "io" @@ -22,6 +23,7 @@ const ( type tomlOpts struct { name string + nameFromTag bool comment string commented bool multiline bool @@ -68,6 +70,13 @@ const ( var timeType = reflect.TypeOf(time.Time{}) var marshalerType = reflect.TypeOf(new(Marshaler)).Elem() +var unmarshalerType = reflect.TypeOf(new(Unmarshaler)).Elem() +var textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem() +var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() +var localDateType = reflect.TypeOf(LocalDate{}) +var localTimeType = reflect.TypeOf(LocalTime{}) +var localDateTimeType = reflect.TypeOf(LocalDateTime{}) +var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) // Check if the given marshal type maps to a Tree primitive func isPrimitive(mtype reflect.Type) bool { @@ -85,29 +94,59 @@ func isPrimitive(mtype reflect.Type) bool { case reflect.String: return true case reflect.Struct: - return mtype == timeType || isCustomMarshaler(mtype) + return isTimeType(mtype) default: return false } } -// Check if the given marshal type maps to a Tree slice -func isTreeSlice(mtype reflect.Type) bool { +func isTimeType(mtype reflect.Type) bool { + return mtype == timeType || mtype == localDateType || mtype == localDateTimeType || mtype == localTimeType +} + +// Check if the given marshal type maps to a Tree slice or array +func isTreeSequence(mtype reflect.Type) bool { switch mtype.Kind() { - case reflect.Slice: - return !isOtherSlice(mtype) + case reflect.Ptr: + return isTreeSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return isTree(mtype.Elem()) default: return false } } -// Check if the given marshal type maps to a non-Tree slice -func isOtherSlice(mtype reflect.Type) bool { +// Check if the given marshal type maps to a slice or array of a custom marshaler type +func isCustomMarshalerSequence(mtype reflect.Type) bool { switch mtype.Kind() { case reflect.Ptr: - return isOtherSlice(mtype.Elem()) - case reflect.Slice: - return isPrimitive(mtype.Elem()) || isOtherSlice(mtype.Elem()) + return isCustomMarshalerSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return isCustomMarshaler(mtype.Elem()) || isCustomMarshaler(reflect.New(mtype.Elem()).Type()) + default: + return false + } +} + +// Check if the given marshal type maps to a slice or array of a text marshaler type +func isTextMarshalerSequence(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isTextMarshalerSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return isTextMarshaler(mtype.Elem()) || isTextMarshaler(reflect.New(mtype.Elem()).Type()) + default: + return false + } +} + +// Check if the given marshal type maps to a non-Tree slice or array +func isOtherSequence(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isOtherSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return !isTreeSequence(mtype) default: return false } @@ -116,6 +155,8 @@ func isOtherSlice(mtype reflect.Type) bool { // Check if the given marshal type maps to a Tree func isTree(mtype reflect.Type) bool { switch mtype.Kind() { + case reflect.Ptr: + return isTree(mtype.Elem()) case reflect.Map: return true case reflect.Struct: @@ -133,12 +174,42 @@ func callCustomMarshaler(mval reflect.Value) ([]byte, error) { return mval.Interface().(Marshaler).MarshalTOML() } +func isTextMarshaler(mtype reflect.Type) bool { + return mtype.Implements(textMarshalerType) && !isTimeType(mtype) +} + +func callTextMarshaler(mval reflect.Value) ([]byte, error) { + return mval.Interface().(encoding.TextMarshaler).MarshalText() +} + +func isCustomUnmarshaler(mtype reflect.Type) bool { + return mtype.Implements(unmarshalerType) +} + +func callCustomUnmarshaler(mval reflect.Value, tval interface{}) error { + return mval.Interface().(Unmarshaler).UnmarshalTOML(tval) +} + +func isTextUnmarshaler(mtype reflect.Type) bool { + return mtype.Implements(textUnmarshalerType) +} + +func callTextUnmarshaler(mval reflect.Value, text []byte) error { + return mval.Interface().(encoding.TextUnmarshaler).UnmarshalText(text) +} + // Marshaler is the interface implemented by types that // can marshal themselves into valid TOML. type Marshaler interface { MarshalTOML() ([]byte, error) } +// Unmarshaler is the interface implemented by types that +// can unmarshal a TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + /* Marshal returns the TOML encoding of v. Behavior is similar to the Go json encoder, except that there is no concept of a Marshaler interface or MarshalTOML @@ -170,7 +241,7 @@ Tree primitive types and corresponding marshal types: float64 float32, float64, pointers to same string string, pointers to same bool bool, pointers to same - time.Time time.Time{}, pointers to same + time.LocalTime time.LocalTime{}, pointers to same For additional flexibility, use the Encoder API. */ @@ -183,20 +254,23 @@ type Encoder struct { w io.Writer encOpts annotation - line int - col int - order marshalOrder + line int + col int + order marshalOrder + promoteAnon bool + indentation string } // NewEncoder returns a new encoder that writes to w. func NewEncoder(w io.Writer) *Encoder { return &Encoder{ - w: w, - encOpts: encOptsDefaults, - annotation: annotationDefault, - line: 0, - col: 1, - order: OrderAlphabetical, + w: w, + encOpts: encOptsDefaults, + annotation: annotationDefault, + line: 0, + col: 1, + order: OrderAlphabetical, + indentation: " ", } } @@ -248,6 +322,12 @@ func (e *Encoder) Order(ord marshalOrder) *Encoder { return e } +// Indentation allows to change indentation when marshalling. +func (e *Encoder) Indentation(indent string) *Encoder { + e.indentation = indent + return e +} + // SetTagName allows changing default tag "toml" func (e *Encoder) SetTagName(v string) *Encoder { e.tag = v @@ -272,8 +352,31 @@ func (e *Encoder) SetTagMultiline(v string) *Encoder { return e } +// PromoteAnonymous allows to change how anonymous struct fields are marshaled. +// Usually, they are marshaled as if the inner exported fields were fields in +// the outer struct. However, if an anonymous struct field is given a name in +// its TOML tag, it is treated like a regular struct field with that name. +// rather than being anonymous. +// +// In case anonymous promotion is enabled, all anonymous structs are promoted +// and treated like regular struct fields. +func (e *Encoder) PromoteAnonymous(promote bool) *Encoder { + e.promoteAnon = promote + return e +} + func (e *Encoder) marshal(v interface{}) ([]byte, error) { + // Check if indentation is valid + for _, char := range e.indentation { + if !isSpace(char) { + return []byte{}, fmt.Errorf("invalid indentation: must only contains space or tab characters") + } + } + mtype := reflect.TypeOf(v) + if mtype == nil { + return []byte{}, errors.New("nil cannot be marshaled to TOML") + } switch mtype.Kind() { case reflect.Struct, reflect.Map: @@ -281,6 +384,9 @@ func (e *Encoder) marshal(v interface{}) ([]byte, error) { if mtype.Elem().Kind() != reflect.Struct { return []byte{}, errors.New("Only pointer to struct can be marshaled to TOML") } + if reflect.ValueOf(v).IsNil() { + return []byte{}, errors.New("nil pointer cannot be marshaled to TOML") + } default: return []byte{}, errors.New("Only a struct or map can be marshaled to TOML") } @@ -289,13 +395,16 @@ func (e *Encoder) marshal(v interface{}) ([]byte, error) { if isCustomMarshaler(mtype) { return callCustomMarshaler(sval) } + if isTextMarshaler(mtype) { + return callTextMarshaler(sval) + } t, err := e.valueToTree(mtype, sval) if err != nil { return []byte{}, err } var buf bytes.Buffer - _, err = t.writeToOrdered(&buf, "", "", 0, e.arraysOneElementPerLine, e.order) + _, err = t.writeToOrdered(&buf, "", "", 0, e.arraysOneElementPerLine, e.order, e.indentation, false) return buf.Bytes(), err } @@ -313,20 +422,29 @@ func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, er tval := e.nextTree() switch mtype.Kind() { case reflect.Struct: - for i := 0; i < mtype.NumField(); i++ { - mtypef, mvalf := mtype.Field(i), mval.Field(i) - opts := tomlOptions(mtypef, e.annotation) - if opts.include && (!opts.omitempty || !isZero(mvalf)) { - val, err := e.valueToToml(mtypef.Type, mvalf) - if err != nil { - return nil, err + switch mval.Interface().(type) { + case Tree: + reflect.ValueOf(tval).Elem().Set(mval) + default: + for i := 0; i < mtype.NumField(); i++ { + mtypef, mvalf := mtype.Field(i), mval.Field(i) + opts := tomlOptions(mtypef, e.annotation) + if opts.include && ((mtypef.Type.Kind() != reflect.Interface && !opts.omitempty) || !isZero(mvalf)) { + val, err := e.valueToToml(mtypef.Type, mvalf) + if err != nil { + return nil, err + } + if tree, ok := val.(*Tree); ok && mtypef.Anonymous && !opts.nameFromTag && !e.promoteAnon { + e.appendTree(tval, tree) + } else { + val = e.wrapTomlValue(val, tval) + tval.SetPathWithOptions([]string{opts.name}, SetOptions{ + Comment: opts.comment, + Commented: opts.commented, + Multiline: opts.multiline, + }, val) + } } - - tval.SetWithOptions(opts.name, SetOptions{ - Comment: opts.comment, - Commented: opts.commented, - Multiline: opts.multiline, - }, val) } } case reflect.Map: @@ -351,18 +469,22 @@ func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, er } for _, key := range keys { mvalf := mval.MapIndex(key) + if (mtype.Elem().Kind() == reflect.Ptr || mtype.Elem().Kind() == reflect.Interface) && mvalf.IsNil() { + continue + } val, err := e.valueToToml(mtype.Elem(), mvalf) if err != nil { return nil, err } + val = e.wrapTomlValue(val, tval) if e.quoteMapKeys { - keyStr, err := tomlValueStringRepresentation(key.String(), "", e.arraysOneElementPerLine) + keyStr, err := tomlValueStringRepresentation(key.String(), "", "", e.order, e.arraysOneElementPerLine) if err != nil { return nil, err } tval.SetPath([]string{keyStr}, val) } else { - tval.Set(key.String(), val) + tval.SetPath([]string{key.String()}, val) } } } @@ -397,19 +519,32 @@ func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (int // Convert given marshal value to toml value func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { - e.line++ if mtype.Kind() == reflect.Ptr { - return e.valueToToml(mtype.Elem(), mval.Elem()) + switch { + case isCustomMarshaler(mtype): + return callCustomMarshaler(mval) + case isTextMarshaler(mtype): + b, err := callTextMarshaler(mval) + return string(b), err + default: + return e.valueToToml(mtype.Elem(), mval.Elem()) + } + } + if mtype.Kind() == reflect.Interface { + return e.valueToToml(mval.Elem().Type(), mval.Elem()) } switch { case isCustomMarshaler(mtype): return callCustomMarshaler(mval) + case isTextMarshaler(mtype): + b, err := callTextMarshaler(mval) + return string(b), err case isTree(mtype): return e.valueToTree(mtype, mval) - case isTreeSlice(mtype): - return e.valueToTreeSlice(mtype, mval) - case isOtherSlice(mtype): + case isOtherSequence(mtype), isCustomMarshalerSequence(mtype), isTextMarshalerSequence(mtype): return e.valueToOtherSlice(mtype, mval) + case isTreeSequence(mtype): + return e.valueToTreeSlice(mtype, mval) default: switch mtype.Kind() { case reflect.Bool: @@ -426,13 +561,45 @@ func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface case reflect.String: return mval.String(), nil case reflect.Struct: - return mval.Interface().(time.Time), nil + return mval.Interface(), nil default: return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind()) } } } +func (e *Encoder) appendTree(t, o *Tree) error { + for key, value := range o.values { + if _, ok := t.values[key]; ok { + continue + } + if tomlValue, ok := value.(*tomlValue); ok { + tomlValue.position.Col = t.position.Col + } + t.values[key] = value + } + return nil +} + +// Create a toml value with the current line number as the position line +func (e *Encoder) wrapTomlValue(val interface{}, parent *Tree) interface{} { + _, isTree := val.(*Tree) + _, isTreeS := val.([]*Tree) + if isTree || isTreeS { + return val + } + + ret := &tomlValue{ + value: val, + position: Position{ + e.line, + parent.position.Col, + }, + } + e.line++ + return ret +} + // Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v. // Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for // sub-structs, and only definite types can be unmarshaled. @@ -445,8 +612,11 @@ func (t *Tree) Unmarshal(v interface{}) error { // See Marshal() documentation for types mapping table. func (t *Tree) Marshal() ([]byte, error) { var buf bytes.Buffer - err := NewEncoder(&buf).Encode(t) - return buf.Bytes(), err + _, err := t.WriteTo(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil } // Unmarshal parses the TOML-encoded data and stores the result in the value @@ -482,6 +652,8 @@ type Decoder struct { tval *Tree encOpts tagName string + strict bool + visitor visitorState } // NewDecoder returns a new decoder that reads from r. @@ -512,8 +684,18 @@ func (d *Decoder) SetTagName(v string) *Decoder { return d } +// Strict allows changing to strict decoding. Any fields that are found in the +// input data and do not have a corresponding struct member cause an error. +func (d *Decoder) Strict(strict bool) *Decoder { + d.strict = strict + return d +} + func (d *Decoder) unmarshal(v interface{}) error { mtype := reflect.TypeOf(v) + if mtype == nil { + return errors.New("nil cannot be unmarshaled from TOML") + } if mtype.Kind() != reflect.Ptr { return errors.New("only a pointer to struct or map can be unmarshaled from TOML") } @@ -522,32 +704,74 @@ func (d *Decoder) unmarshal(v interface{}) error { switch elem.Kind() { case reflect.Struct, reflect.Map: + case reflect.Interface: + elem = mapStringInterfaceType default: return errors.New("only a pointer to struct or map can be unmarshaled from TOML") } - sval, err := d.valueFromTree(elem, d.tval) + if reflect.ValueOf(v).IsNil() { + return errors.New("nil pointer cannot be unmarshaled from TOML") + } + + vv := reflect.ValueOf(v).Elem() + + if d.strict { + d.visitor = newVisitorState(d.tval) + } + + sval, err := d.valueFromTree(elem, d.tval, &vv) if err != nil { return err } + if err := d.visitor.validate(); err != nil { + return err + } reflect.ValueOf(v).Elem().Set(sval) return nil } -// Convert toml tree to marshal struct or map, using marshal type -func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, error) { +// Convert toml tree to marshal struct or map, using marshal type. When mval1 +// is non-nil, merge fields into the given value instead of allocating a new one. +func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.Value) (reflect.Value, error) { if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval) + return d.unwrapPointer(mtype, tval, mval1) + } + + // Check if pointer to value implements the Unmarshaler interface. + if mvalPtr := reflect.New(mtype); isCustomUnmarshaler(mvalPtr.Type()) { + d.visitor.visitAll() + + if tval == nil { + return mvalPtr.Elem(), nil + } + + if err := callCustomUnmarshaler(mvalPtr, tval.ToMap()); err != nil { + return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err) + } + return mvalPtr.Elem(), nil } + var mval reflect.Value switch mtype.Kind() { case reflect.Struct: - mval = reflect.New(mtype).Elem() - for i := 0; i < mtype.NumField(); i++ { - mtypef := mtype.Field(i) - an := annotation{tag: d.tagName} - opts := tomlOptions(mtypef, an) - if opts.include { + if mval1 != nil { + mval = *mval1 + } else { + mval = reflect.New(mtype).Elem() + } + + switch mval.Interface().(type) { + case Tree: + mval.Set(reflect.ValueOf(tval).Elem()) + default: + for i := 0; i < mtype.NumField(); i++ { + mtypef := mtype.Field(i) + an := annotation{tag: d.tagName} + opts := tomlOptions(mtypef, an) + if !opts.include { + continue + } baseKey := opts.name keysToTry := []string{ baseKey, @@ -557,19 +781,25 @@ func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, } found := false - for _, key := range keysToTry { - exists := tval.Has(key) - if !exists { - continue - } - val := tval.Get(key) - mvalf, err := d.valueFromToml(mtypef.Type, val) - if err != nil { - return mval, formatError(err, tval.GetPosition(key)) + if tval != nil { + for _, key := range keysToTry { + exists := tval.HasPath([]string{key}) + if !exists { + continue + } + + d.visitor.push(key) + val := tval.GetPath([]string{key}) + fval := mval.Field(i) + mvalf, err := d.valueFromToml(mtypef.Type, val, &fval) + if err != nil { + return mval, formatError(err, tval.GetPositionPath([]string{key})) + } + mval.Field(i).Set(mvalf) + found = true + d.visitor.pop() + break } - mval.Field(i).Set(mvalf) - found = true - break } if !found && opts.defaultValue != "" { @@ -577,45 +807,71 @@ func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, var val interface{} var err error switch mvalf.Kind() { + case reflect.String: + val = opts.defaultValue case reflect.Bool: val, err = strconv.ParseBool(opts.defaultValue) - if err != nil { - return mval.Field(i), err - } + case reflect.Uint: + val, err = strconv.ParseUint(opts.defaultValue, 10, 0) + case reflect.Uint8: + val, err = strconv.ParseUint(opts.defaultValue, 10, 8) + case reflect.Uint16: + val, err = strconv.ParseUint(opts.defaultValue, 10, 16) + case reflect.Uint32: + val, err = strconv.ParseUint(opts.defaultValue, 10, 32) + case reflect.Uint64: + val, err = strconv.ParseUint(opts.defaultValue, 10, 64) case reflect.Int: - val, err = strconv.Atoi(opts.defaultValue) - if err != nil { - return mval.Field(i), err - } - case reflect.String: - val = opts.defaultValue + val, err = strconv.ParseInt(opts.defaultValue, 10, 0) + case reflect.Int8: + val, err = strconv.ParseInt(opts.defaultValue, 10, 8) + case reflect.Int16: + val, err = strconv.ParseInt(opts.defaultValue, 10, 16) + case reflect.Int32: + val, err = strconv.ParseInt(opts.defaultValue, 10, 32) case reflect.Int64: val, err = strconv.ParseInt(opts.defaultValue, 10, 64) - if err != nil { - return mval.Field(i), err - } + case reflect.Float32: + val, err = strconv.ParseFloat(opts.defaultValue, 32) case reflect.Float64: val, err = strconv.ParseFloat(opts.defaultValue, 64) - if err != nil { - return mval.Field(i), err - } default: - return mval.Field(i), fmt.Errorf("unsuported field type for default option") + return mvalf, fmt.Errorf("unsupported field type for default option") + } + + if err != nil { + return mvalf, err + } + mvalf.Set(reflect.ValueOf(val).Convert(mvalf.Type())) + } + + // save the old behavior above and try to check structs + if !found && opts.defaultValue == "" && mtypef.Type.Kind() == reflect.Struct { + tmpTval := tval + if !mtypef.Anonymous { + tmpTval = nil + } + fval := mval.Field(i) + v, err := d.valueFromTree(mtypef.Type, tmpTval, &fval) + if err != nil { + return v, err } - mval.Field(i).Set(reflect.ValueOf(val)) + mval.Field(i).Set(v) } } } case reflect.Map: mval = reflect.MakeMap(mtype) for _, key := range tval.Keys() { + d.visitor.push(key) // TODO: path splits key val := tval.GetPath([]string{key}) - mvalf, err := d.valueFromToml(mtype.Elem(), val) + mvalf, err := d.valueFromToml(mtype.Elem(), val, nil) if err != nil { - return mval, formatError(err, tval.GetPosition(key)) + return mval, formatError(err, tval.GetPositionPath([]string{key})) } mval.SetMapIndex(reflect.ValueOf(key).Convert(mtype.Key()), mvalf) + d.visitor.pop() } } return mval, nil @@ -623,22 +879,32 @@ func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, // Convert toml value to marshal struct/map slice, using marshal type func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) { - mval := reflect.MakeSlice(mtype, len(tval), len(tval)) + mval, err := makeSliceOrArray(mtype, len(tval)) + if err != nil { + return mval, err + } + for i := 0; i < len(tval); i++ { - val, err := d.valueFromTree(mtype.Elem(), tval[i]) + d.visitor.push(strconv.Itoa(i)) + val, err := d.valueFromTree(mtype.Elem(), tval[i], nil) if err != nil { return mval, err } mval.Index(i).Set(val) + d.visitor.pop() } return mval, nil } // Convert toml value to marshal primitive slice, using marshal type func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { - mval := reflect.MakeSlice(mtype, len(tval), len(tval)) + mval, err := makeSliceOrArray(mtype, len(tval)) + if err != nil { + return mval, err + } + for i := 0; i < len(tval); i++ { - val, err := d.valueFromToml(mtype.Elem(), tval[i]) + val, err := d.valueFromToml(mtype.Elem(), tval[i], nil) if err != nil { return mval, err } @@ -647,33 +913,133 @@ func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (r return mval, nil } -// Convert toml value to marshal value, using marshal type -func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}) (reflect.Value, error) { +// Convert toml value to marshal primitive slice, using marshal type +func (d *Decoder) valueFromOtherSliceI(mtype reflect.Type, tval interface{}) (reflect.Value, error) { + val := reflect.ValueOf(tval) + length := val.Len() + + mval, err := makeSliceOrArray(mtype, length) + if err != nil { + return mval, err + } + + for i := 0; i < length; i++ { + val, err := d.valueFromToml(mtype.Elem(), val.Index(i).Interface(), nil) + if err != nil { + return mval, err + } + mval.Index(i).Set(val) + } + return mval, nil +} + +// Create a new slice or a new array with specified length +func makeSliceOrArray(mtype reflect.Type, tLength int) (reflect.Value, error) { + var mval reflect.Value + switch mtype.Kind() { + case reflect.Slice: + mval = reflect.MakeSlice(mtype, tLength, tLength) + case reflect.Array: + mval = reflect.New(reflect.ArrayOf(mtype.Len(), mtype.Elem())).Elem() + if tLength > mtype.Len() { + return mval, fmt.Errorf("unmarshal: TOML array length (%v) exceeds destination array length (%v)", tLength, mtype.Len()) + } + } + return mval, nil +} + +// Convert toml value to marshal value, using marshal type. When mval1 is non-nil +// and the given type is a struct value, merge fields into it. +func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval) + return d.unwrapPointer(mtype, tval, mval1) } switch t := tval.(type) { case *Tree: + var mval11 *reflect.Value + if mtype.Kind() == reflect.Struct { + mval11 = mval1 + } + if isTree(mtype) { - return d.valueFromTree(mtype, t) + return d.valueFromTree(mtype, t, mval11) } + + if mtype.Kind() == reflect.Interface { + if mval1 == nil || mval1.IsNil() { + return d.valueFromTree(reflect.TypeOf(map[string]interface{}{}), t, nil) + } else { + return d.valueFromToml(mval1.Elem().Type(), t, nil) + } + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval) case []*Tree: - if isTreeSlice(mtype) { + if isTreeSequence(mtype) { return d.valueFromTreeSlice(mtype, t) } + if mtype.Kind() == reflect.Interface { + if mval1 == nil || mval1.IsNil() { + return d.valueFromTreeSlice(reflect.TypeOf([]map[string]interface{}{}), t) + } else { + ival := mval1.Elem() + return d.valueFromToml(mval1.Elem().Type(), t, &ival) + } + } return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval) case []interface{}: - if isOtherSlice(mtype) { + d.visitor.visit() + if isOtherSequence(mtype) { return d.valueFromOtherSlice(mtype, t) } + if mtype.Kind() == reflect.Interface { + if mval1 == nil || mval1.IsNil() { + return d.valueFromOtherSlice(reflect.TypeOf([]interface{}{}), t) + } else { + ival := mval1.Elem() + return d.valueFromToml(mval1.Elem().Type(), t, &ival) + } + } return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval) default: + d.visitor.visit() + // Check if pointer to value implements the encoding.TextUnmarshaler. + if mvalPtr := reflect.New(mtype); isTextUnmarshaler(mvalPtr.Type()) && !isTimeType(mtype) { + if err := d.unmarshalText(tval, mvalPtr); err != nil { + return reflect.ValueOf(nil), fmt.Errorf("unmarshal text: %v", err) + } + return mvalPtr.Elem(), nil + } + switch mtype.Kind() { case reflect.Bool, reflect.Struct: val := reflect.ValueOf(tval) - // if this passes for when mtype is reflect.Struct, tval is a time.Time + + switch val.Type() { + case localDateType: + localDate := val.Interface().(LocalDate) + switch mtype { + case timeType: + return reflect.ValueOf(time.Date(localDate.Year, localDate.Month, localDate.Day, 0, 0, 0, 0, time.Local)), nil + } + case localDateTimeType: + localDateTime := val.Interface().(LocalDateTime) + switch mtype { + case timeType: + return reflect.ValueOf(time.Date( + localDateTime.Date.Year, + localDateTime.Date.Month, + localDateTime.Date.Day, + localDateTime.Time.Hour, + localDateTime.Time.Minute, + localDateTime.Time.Second, + localDateTime.Time.Nanosecond, + time.Local)), nil + } + } + + // if this passes for when mtype is reflect.Struct, tval is a time.LocalTime if !val.Type().ConvertibleTo(mtype) { return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) } @@ -696,46 +1062,65 @@ func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}) (reflect.V } return reflect.ValueOf(d), nil } - if !val.Type().ConvertibleTo(mtype) { + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) } - if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Convert(mtype).Int()) { + if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Convert(reflect.TypeOf(int64(0))).Int()) { return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) } return val.Convert(mtype), nil case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) { + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) } if val.Convert(reflect.TypeOf(int(1))).Int() < 0 { return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String()) } - if reflect.Indirect(reflect.New(mtype)).OverflowUint(uint64(val.Convert(mtype).Uint())) { + if reflect.Indirect(reflect.New(mtype)).OverflowUint(val.Convert(reflect.TypeOf(uint64(0))).Uint()) { return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) } return val.Convert(mtype), nil case reflect.Float32, reflect.Float64: val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) { + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) } - if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Convert(mtype).Float()) { + if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Convert(reflect.TypeOf(float64(0))).Float()) { return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) } return val.Convert(mtype), nil + case reflect.Interface: + if mval1 == nil || mval1.IsNil() { + return reflect.ValueOf(tval), nil + } else { + ival := mval1.Elem() + return d.valueFromToml(mval1.Elem().Type(), t, &ival) + } + case reflect.Slice, reflect.Array: + if isOtherSequence(mtype) && isOtherSequence(reflect.TypeOf(t)) { + return d.valueFromOtherSliceI(mtype, t) + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) default: return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) } } } -func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.Value, error) { - val, err := d.valueFromToml(mtype.Elem(), tval) +func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { + var melem *reflect.Value + + if mval1 != nil && !mval1.IsNil() && (mtype.Elem().Kind() == reflect.Struct || mtype.Elem().Kind() == reflect.Interface) { + elem := mval1.Elem() + melem = &elem + } + + val, err := d.valueFromToml(mtype.Elem(), tval, melem) if err != nil { return reflect.ValueOf(nil), err } @@ -744,6 +1129,12 @@ func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.V return mval, nil } +func (d *Decoder) unmarshalText(tval interface{}, mval reflect.Value) error { + var buf bytes.Buffer + fmt.Fprint(&buf, tval) + return callTextUnmarshaler(mval, buf.Bytes()) +} + func tomlOptions(vf reflect.StructField, an annotation) tomlOpts { tag := vf.Tag.Get(an.tag) parse := strings.Split(tag, ",") @@ -756,6 +1147,7 @@ func tomlOptions(vf reflect.StructField, an annotation) tomlOpts { defaultValue := vf.Tag.Get(tagDefault) result := tomlOpts{ name: vf.Name, + nameFromTag: false, comment: comment, commented: commented, multiline: multiline, @@ -768,6 +1160,7 @@ func tomlOptions(vf reflect.StructField, an annotation) tomlOpts { result.include = false } else { result.name = strings.Trim(parse[0], " ") + result.nameFromTag = true } } if vf.PkgPath != "" { @@ -784,11 +1177,7 @@ func tomlOptions(vf reflect.StructField, an annotation) tomlOpts { func isZero(val reflect.Value) bool { switch val.Type().Kind() { - case reflect.Map: - fallthrough - case reflect.Array: - fallthrough - case reflect.Slice: + case reflect.Slice, reflect.Array, reflect.Map: return val.Len() == 0 default: return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface()) @@ -801,3 +1190,80 @@ func formatError(err error, pos Position) error { } return fmt.Errorf("%s: %s", pos, err) } + +// visitorState keeps track of which keys were unmarshaled. +type visitorState struct { + tree *Tree + path []string + keys map[string]struct{} + active bool +} + +func newVisitorState(tree *Tree) visitorState { + path, result := []string{}, map[string]struct{}{} + insertKeys(path, result, tree) + return visitorState{ + tree: tree, + path: path[:0], + keys: result, + active: true, + } +} + +func (s *visitorState) push(key string) { + if s.active { + s.path = append(s.path, key) + } +} + +func (s *visitorState) pop() { + if s.active { + s.path = s.path[:len(s.path)-1] + } +} + +func (s *visitorState) visit() { + if s.active { + delete(s.keys, strings.Join(s.path, ".")) + } +} + +func (s *visitorState) visitAll() { + if s.active { + for k := range s.keys { + if strings.HasPrefix(k, strings.Join(s.path, ".")) { + delete(s.keys, k) + } + } + } +} + +func (s *visitorState) validate() error { + if !s.active { + return nil + } + undecoded := make([]string, 0, len(s.keys)) + for key := range s.keys { + undecoded = append(undecoded, key) + } + sort.Strings(undecoded) + if len(undecoded) > 0 { + return fmt.Errorf("undecoded keys: %q", undecoded) + } + return nil +} + +func insertKeys(path []string, m map[string]struct{}, tree *Tree) { + for k, v := range tree.values { + switch node := v.(type) { + case []*Tree: + for i, item := range node { + insertKeys(append(path, k, strconv.Itoa(i)), m, item) + } + case *Tree: + insertKeys(append(path, k), m, node) + case *tomlValue: + m[strings.Join(append(path, k), ".")] = struct{}{} + } + } +} diff --git a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_Map_test.toml b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_Map_test.toml deleted file mode 100644 index a3bd513..0000000 --- a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_Map_test.toml +++ /dev/null @@ -1,17 +0,0 @@ -title = "TOML Marshal Testing" - -[basic_map] - one = "one" - two = "two" - -[long_map] - a7 = "1" - b3 = "2" - c8 = "3" - d4 = "4" - e6 = "5" - f5 = "6" - g10 = "7" - h1 = "8" - i2 = "9" - j9 = "10" diff --git a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml index 9d68b59..792b72e 100644 --- a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml +++ b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml @@ -27,6 +27,7 @@ title = "TOML Marshal Testing" uint = 5001 bool = true float = 123.4 + float64 = 123.456782132399 int = 5000 string = "Bite me" date = 1979-05-27T07:32:00Z diff --git a/vendor/github.com/pelletier/go-toml/marshal_test.toml b/vendor/github.com/pelletier/go-toml/marshal_test.toml index 1c5f98e..ba5e110 100644 --- a/vendor/github.com/pelletier/go-toml/marshal_test.toml +++ b/vendor/github.com/pelletier/go-toml/marshal_test.toml @@ -4,6 +4,7 @@ title = "TOML Marshal Testing" bool = true date = 1979-05-27T07:32:00Z float = 123.4 + float64 = 123.456782132399 int = 5000 string = "Bite me" uint = 5001 diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go index a7498e4..7bf40bb 100644 --- a/vendor/github.com/pelletier/go-toml/parser.go +++ b/vendor/github.com/pelletier/go-toml/parser.go @@ -158,6 +158,11 @@ func (p *tomlParser) parseGroup() tomlParserStateFn { if err := p.tree.createSubTree(keys, startToken.Position); err != nil { p.raiseError(key, "%s", err) } + destTree := p.tree.GetPath(keys) + if target, ok := destTree.(*Tree); ok && target != nil && target.inline { + p.raiseError(key, "could not re-define exist inline table or its sub-table : %s", + strings.Join(keys, ".")) + } p.assume(tokenRightBracket) p.currentTable = keys return p.parseStart @@ -201,6 +206,11 @@ func (p *tomlParser) parseAssign() tomlParserStateFn { strings.Join(tableKey, ".")) } + if targetNode.inline { + p.raiseError(key, "could not add key or sub-table to exist inline table or its sub-table : %s", + strings.Join(tableKey, ".")) + } + // assign value to the found table keyVal := parsedKey[len(parsedKey)-1] localKey := []string{keyVal} @@ -313,7 +323,41 @@ func (p *tomlParser) parseRvalue() interface{} { } return val case tokenDate: - val, err := time.ParseInLocation(time.RFC3339Nano, tok.val, time.UTC) + layout := time.RFC3339Nano + if !strings.Contains(tok.val, "T") { + layout = strings.Replace(layout, "T", " ", 1) + } + val, err := time.ParseInLocation(layout, tok.val, time.UTC) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenLocalDate: + v := strings.Replace(tok.val, " ", "T", -1) + isDateTime := false + isTime := false + for _, c := range v { + if c == 'T' || c == 't' { + isDateTime = true + break + } + if c == ':' { + isTime = true + break + } + } + + var val interface{} + var err error + + if isDateTime { + val, err = ParseLocalDateTime(v) + } else if isTime { + val, err = ParseLocalTime(v) + } else { + val, err = ParseLocalDate(v) + } + if err != nil { p.raiseError(tok, "%s", err) } @@ -356,12 +400,15 @@ Loop: } key := p.getToken() p.assume(tokenEqual) + + parsedKey, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid key: %s", err) + } + value := p.parseRvalue() - tree.Set(key.val, value) + tree.SetPath(parsedKey, value) case tokenComma: - if previous == nil { - p.raiseError(follow, "inline table cannot start with a comma") - } if tokenIsComma(previous) { p.raiseError(follow, "need field between two commas in inline table") } @@ -374,12 +421,13 @@ Loop: if tokenIsComma(previous) { p.raiseError(previous, "trailing comma at the end of inline table") } + tree.inline = true return tree } func (p *tomlParser) parseArray() interface{} { var array []interface{} - arrayType := reflect.TypeOf(nil) + arrayType := reflect.TypeOf(newTree()) for { follow := p.peek() if follow == nil || follow.typ == tokenEOF { @@ -390,11 +438,8 @@ func (p *tomlParser) parseArray() interface{} { break } val := p.parseRvalue() - if arrayType == nil { - arrayType = reflect.TypeOf(val) - } if reflect.TypeOf(val) != arrayType { - p.raiseError(follow, "mixed types in array") + arrayType = nil } array = append(array, val) follow = p.peek() @@ -408,6 +453,12 @@ func (p *tomlParser) parseArray() interface{} { p.getToken() } } + + // if the array is a mixed-type array or its length is 0, + // don't convert it to a table array + if len(array) <= 0 { + arrayType = nil + } // An array of Trees is actually an array of inline // tables, which is a shorthand for a table array. If the // array was not converted from []interface{} to []*Tree, diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go index 1a90813..6af4ec4 100644 --- a/vendor/github.com/pelletier/go-toml/token.go +++ b/vendor/github.com/pelletier/go-toml/token.go @@ -1,10 +1,6 @@ package toml -import ( - "fmt" - "strconv" - "unicode" -) +import "fmt" // Define tokens type tokenType int @@ -35,6 +31,7 @@ const ( tokenDoubleLeftBracket tokenDoubleRightBracket tokenDate + tokenLocalDate tokenKeyGroup tokenKeyGroupArray tokenComma @@ -68,7 +65,8 @@ var tokenTypeNames = []string{ ")", "]]", "[[", - "Date", + "LocalDate", + "LocalDate", "KeyGroup", "KeyGroupArray", ",", @@ -95,14 +93,6 @@ func (tt tokenType) String() string { return "Unknown" } -func (t token) Int() int { - if result, err := strconv.Atoi(t.val); err != nil { - panic(err) - } else { - return result - } -} - func (t token) String() string { switch t.typ { case tokenEOF: @@ -119,7 +109,7 @@ func isSpace(r rune) bool { } func isAlphanumeric(r rune) bool { - return unicode.IsLetter(r) || r == '_' + return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || r == '_' } func isKeyChar(r rune) bool { @@ -134,7 +124,7 @@ func isKeyStartChar(r rune) bool { } func isDigit(r rune) bool { - return unicode.IsNumber(r) + return '0' <= r && r <= '9' } func isHexDigit(r rune) bool { diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go index 358a9be..cbb89a9 100644 --- a/vendor/github.com/pelletier/go-toml/toml.go +++ b/vendor/github.com/pelletier/go-toml/toml.go @@ -23,6 +23,7 @@ type Tree struct { values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree comment string commented bool + inline bool position Position } @@ -121,6 +122,89 @@ func (t *Tree) GetPath(keys []string) interface{} { } } +// GetArray returns the value at key in the Tree. +// It returns []string, []int64, etc type if key has homogeneous lists +// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. +// Returns nil if the path does not exist in the tree. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetArray(key string) interface{} { + if key == "" { + return t + } + return t.GetArrayPath(strings.Split(key, ".")) +} + +// GetArrayPath returns the element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetArrayPath(keys []string) interface{} { + if len(keys) == 0 { + return t + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return nil + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return nil + } + subtree = node[len(node)-1] + default: + return nil // cannot navigate through other node types + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + switch n := node.value.(type) { + case []interface{}: + return getArray(n) + default: + return node.value + } + default: + return node + } +} + +// if homogeneous array, then return slice type object over []interface{} +func getArray(n []interface{}) interface{} { + var s []string + var i64 []int64 + var f64 []float64 + var bl []bool + for _, value := range n { + switch v := value.(type) { + case string: + s = append(s, v) + case int64: + i64 = append(i64, v) + case float64: + f64 = append(f64, v) + case bool: + bl = append(bl, v) + default: + return n + } + } + if len(s) == len(n) { + return s + } else if len(i64) == len(n) { + return i64 + } else if len(f64) == len(n) { + return f64 + } else if len(bl) == len(n) { + return bl + } + return n +} + // GetPosition returns the position of the given key. func (t *Tree) GetPosition(key string) Position { if key == "" { @@ -129,6 +213,50 @@ func (t *Tree) GetPosition(key string) Position { return t.GetPositionPath(strings.Split(key, ".")) } +// SetPositionPath sets the position of element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree position is set. +func (t *Tree) SetPositionPath(keys []string, pos Position) { + if len(keys) == 0 { + t.position = pos + return + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return + } + subtree = node[len(node)-1] + default: + return + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + node.position = pos + return + case *Tree: + node.position = pos + return + case []*Tree: + // go to most recent element + if len(node) == 0 { + return + } + node[len(node)-1].position = pos + return + } +} + // GetPositionPath returns the element in the tree indicated by 'keys'. // If keys is of length zero, the current tree is returned. func (t *Tree) GetPositionPath(keys []string) Position { @@ -211,7 +339,8 @@ func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interfac // go to most recent element if len(node) == 0 { // create element if it does not exist - subtree.values[intermediateKey] = append(node, newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})) + node = append(node, newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})) + subtree.values[intermediateKey] = node } subtree = node[len(node)-1] } @@ -222,11 +351,17 @@ func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interfac switch v := value.(type) { case *Tree: v.comment = opts.Comment + v.commented = opts.Commented toInsert = value case []*Tree: + for i := range v { + v[i].commented = opts.Commented + } toInsert = value case *tomlValue: v.comment = opts.Comment + v.commented = opts.Commented + v.multiline = opts.Multiline toInsert = v default: toInsert = &tomlValue{value: value, @@ -307,6 +442,7 @@ func (t *Tree) createSubTree(keys []string, pos Position) error { if !exists { tree := newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) tree.position = pos + tree.inline = subtree.inline subtree.values[intermediateKey] = tree nextTree = tree } diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create.go b/vendor/github.com/pelletier/go-toml/tomltree_create.go index 79610e9..8035350 100644 --- a/vendor/github.com/pelletier/go-toml/tomltree_create.go +++ b/vendor/github.com/pelletier/go-toml/tomltree_create.go @@ -57,6 +57,19 @@ func simpleValueCoercion(object interface{}) (interface{}, error) { return float64(original), nil case fmt.Stringer: return original.String(), nil + case []interface{}: + value := reflect.ValueOf(original) + length := value.Len() + arrayValue := reflect.MakeSlice(value.Type(), 0, length) + for i := 0; i < length; i++ { + val := value.Index(i).Interface() + simpleValue, err := simpleValueCoercion(val) + if err != nil { + return nil, err + } + arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) + } + return arrayValue.Interface(), nil default: return nil, fmt.Errorf("cannot convert type %T to Tree", object) } diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go index 198d5ac..ae6dac4 100644 --- a/vendor/github.com/pelletier/go-toml/tomltree_write.go +++ b/vendor/github.com/pelletier/go-toml/tomltree_write.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "math" + "math/big" "reflect" "sort" "strconv" @@ -27,23 +28,35 @@ type sortNode struct { // Encodes a string to a TOML-compliant multi-line string value // This function is a clone of the existing encodeTomlString function, except that whitespace characters // are preserved. Quotation marks and backslashes are also not escaped. -func encodeMultilineTomlString(value string) string { +func encodeMultilineTomlString(value string, commented string) string { var b bytes.Buffer - - for _, rr := range value { + adjacentQuoteCount := 0 + + b.WriteString(commented) + for i, rr := range value { + if rr != '"' { + adjacentQuoteCount = 0 + } else { + adjacentQuoteCount++ + } switch rr { case '\b': b.WriteString(`\b`) case '\t': b.WriteString("\t") case '\n': - b.WriteString("\n") + b.WriteString("\n" + commented) case '\f': b.WriteString(`\f`) case '\r': b.WriteString("\r") case '"': - b.WriteString(`"`) + if adjacentQuoteCount >= 3 || i == len(value)-1 { + adjacentQuoteCount = 0 + b.WriteString(`\"`) + } else { + b.WriteString(`"`) + } case '\\': b.WriteString(`\`) default: @@ -90,7 +103,30 @@ func encodeTomlString(value string) string { return b.String() } -func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElementPerLine bool) (string, error) { +func tomlTreeStringRepresentation(t *Tree, ord marshalOrder) (string, error) { + var orderedVals []sortNode + switch ord { + case OrderPreserve: + orderedVals = sortByLines(t) + default: + orderedVals = sortAlphabetical(t) + } + + var values []string + for _, node := range orderedVals { + k := node.key + v := t.values[k] + + repr, err := tomlValueStringRepresentation(v, "", "", ord, false) + if err != nil { + return "", err + } + values = append(values, quoteKeyIfNeeded(k)+" = "+repr) + } + return "{ " + strings.Join(values, ", ") + " }", nil +} + +func tomlValueStringRepresentation(v interface{}, commented string, indent string, ord marshalOrder, arraysOneElementPerLine bool) (string, error) { // this interface check is added to dereference the change made in the writeTo function. // That change was made to allow this function to see formatting options. tv, ok := v.(*tomlValue) @@ -106,20 +142,28 @@ func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElemen case int64: return strconv.FormatInt(value, 10), nil case float64: - // Ensure a round float does contain a decimal point. Otherwise feeding - // the output back to the parser would convert to an integer. + // Default bit length is full 64 + bits := 64 + // Float panics if nan is used + if !math.IsNaN(value) { + // if 32 bit accuracy is enough to exactly show, use 32 + _, acc := big.NewFloat(value).Float32() + if acc == big.Exact { + bits = 32 + } + } if math.Trunc(value) == value { - return strings.ToLower(strconv.FormatFloat(value, 'f', 1, 32)), nil + return strings.ToLower(strconv.FormatFloat(value, 'f', 1, bits)), nil } - return strings.ToLower(strconv.FormatFloat(value, 'f', -1, 32)), nil + return strings.ToLower(strconv.FormatFloat(value, 'f', -1, bits)), nil case string: if tv.multiline { - return "\"\"\"\n" + encodeMultilineTomlString(value) + "\"\"\"", nil + return "\"\"\"\n" + encodeMultilineTomlString(value, commented) + "\"\"\"", nil } return "\"" + encodeTomlString(value) + "\"", nil case []byte: b, _ := v.([]byte) - return tomlValueStringRepresentation(string(b), indent, arraysOneElementPerLine) + return string(b), nil case bool: if value { return "true", nil @@ -127,6 +171,14 @@ func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElemen return "false", nil case time.Time: return value.Format(time.RFC3339), nil + case LocalDate: + return value.String(), nil + case LocalDateTime: + return value.String(), nil + case LocalTime: + return value.String(), nil + case *Tree: + return tomlTreeStringRepresentation(value, ord) case nil: return "", nil } @@ -137,7 +189,7 @@ func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElemen var values []string for i := 0; i < rv.Len(); i++ { item := rv.Index(i).Interface() - itemRepr, err := tomlValueStringRepresentation(item, indent, arraysOneElementPerLine) + itemRepr, err := tomlValueStringRepresentation(item, commented, indent, ord, arraysOneElementPerLine) if err != nil { return "", err } @@ -151,16 +203,16 @@ func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElemen for _, value := range values { stringBuffer.WriteString(valueIndent) - stringBuffer.WriteString(value) + stringBuffer.WriteString(commented + value) stringBuffer.WriteString(`,`) stringBuffer.WriteString("\n") } - stringBuffer.WriteString(indent + "]") + stringBuffer.WriteString(indent + commented + "]") return stringBuffer.String(), nil } - return "[" + strings.Join(values, ",") + "]", nil + return "[" + strings.Join(values, ", ") + "]", nil } return "", fmt.Errorf("unsupported value type %T: %v", v, v) } @@ -255,10 +307,10 @@ func sortAlphabetical(t *Tree) (vals []sortNode) { } func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { - return t.writeToOrdered(w, indent, keyspace, bytesCount, arraysOneElementPerLine, OrderAlphabetical) + return t.writeToOrdered(w, indent, keyspace, bytesCount, arraysOneElementPerLine, OrderAlphabetical, " ", false) } -func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool, ord marshalOrder) (int64, error) { +func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool, ord marshalOrder, indentString string, parentCommented bool) (int64, error) { var orderedVals []sortNode switch ord { @@ -274,14 +326,10 @@ func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount i k := node.key v := t.values[k] - combinedKey := k + combinedKey := quoteKeyIfNeeded(k) if keyspace != "" { combinedKey = keyspace + "." + combinedKey } - var commented string - if t.commented { - commented = "# " - } switch node := v.(type) { // node has to be of those two types given how keys are sorted above @@ -302,24 +350,33 @@ func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount i return bytesCount, errc } } + + var commented string + if parentCommented || t.commented || tv.commented { + commented = "# " + } writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") bytesCount += int64(writtenBytesCount) if err != nil { return bytesCount, err } - bytesCount, err = node.writeToOrdered(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine, ord) + bytesCount, err = node.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, parentCommented || t.commented || tv.commented) if err != nil { return bytesCount, err } case []*Tree: for _, subTree := range node { + var commented string + if parentCommented || t.commented || subTree.commented { + commented = "# " + } writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") bytesCount += int64(writtenBytesCount) if err != nil { return bytesCount, err } - bytesCount, err = subTree.writeToOrdered(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine, ord) + bytesCount, err = subTree.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, parentCommented || t.commented || subTree.commented) if err != nil { return bytesCount, err } @@ -332,7 +389,11 @@ func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount i return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) } - repr, err := tomlValueStringRepresentation(v, indent, arraysOneElementPerLine) + var commented string + if parentCommented || t.commented || v.commented { + commented = "# " + } + repr, err := tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine) if err != nil { return bytesCount, err } @@ -350,11 +411,8 @@ func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount i } } - var commented string - if v.commented { - commented = "# " - } - writtenBytesCount, err := writeStrings(w, indent, commented, k, " = ", repr, "\n") + quotedKey := quoteKeyIfNeeded(k) + writtenBytesCount, err := writeStrings(w, indent, commented, quotedKey, " = ", repr, "\n") bytesCount += int64(writtenBytesCount) if err != nil { return bytesCount, err @@ -365,6 +423,32 @@ func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount i return bytesCount, nil } +// quote a key if it does not fit the bare key format (A-Za-z0-9_-) +// quoted keys use the same rules as strings +func quoteKeyIfNeeded(k string) string { + // when encoding a map with the 'quoteMapKeys' option enabled, the tree will contain + // keys that have already been quoted. + // not an ideal situation, but good enough of a stop gap. + if len(k) >= 2 && k[0] == '"' && k[len(k)-1] == '"' { + return k + } + isBare := true + for _, r := range k { + if !isValidBareChar(r) { + isBare = false + break + } + } + if isBare { + return k + } + return quoteKey(k) +} + +func quoteKey(k string) string { + return "\"" + encodeTomlString(k) + "\"" +} + func writeStrings(w io.Writer, s ...string) (int, error) { var n int for i := range s { @@ -387,12 +471,11 @@ func (t *Tree) WriteTo(w io.Writer) (int64, error) { // Output spans multiple lines, and is suitable for ingest by a TOML parser. // If the conversion cannot be performed, ToString returns a non-nil error. func (t *Tree) ToTomlString() (string, error) { - var buf bytes.Buffer - _, err := t.WriteTo(&buf) + b, err := t.Marshal() if err != nil { return "", err } - return buf.String(), nil + return string(b), nil } // String generates a human-readable representation of the current tree. diff --git a/vendor/github.com/philhofer/fwd/LICENSE.md b/vendor/github.com/philhofer/fwd/LICENSE.md new file mode 100644 index 0000000..1ac6a81 --- /dev/null +++ b/vendor/github.com/philhofer/fwd/LICENSE.md @@ -0,0 +1,7 @@ +Copyright (c) 2014-2015, Philip Hofer + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/philhofer/fwd/README.md b/vendor/github.com/philhofer/fwd/README.md new file mode 100644 index 0000000..38349af --- /dev/null +++ b/vendor/github.com/philhofer/fwd/README.md @@ -0,0 +1,315 @@ + +# fwd + import "github.com/philhofer/fwd" + +The `fwd` package provides a buffered reader +and writer. Each has methods that help improve +the encoding/decoding performance of some binary +protocols. + +The `fwd.Writer` and `fwd.Reader` type provide similar +functionality to their counterparts in `bufio`, plus +a few extra utility methods that simplify read-ahead +and write-ahead. I wrote this package to improve serialization +performance for http://github.com/tinylib/msgp, +where it provided about a 2x speedup over `bufio` for certain +workloads. However, care must be taken to understand the semantics of the +extra methods provided by this package, as they allow +the user to access and manipulate the buffer memory +directly. + +The extra methods for `fwd.Reader` are `Peek`, `Skip` +and `Next`. `(*fwd.Reader).Peek`, unlike `(*bufio.Reader).Peek`, +will re-allocate the read buffer in order to accommodate arbitrarily +large read-ahead. `(*fwd.Reader).Skip` skips the next `n` bytes +in the stream, and uses the `io.Seeker` interface if the underlying +stream implements it. `(*fwd.Reader).Next` returns a slice pointing +to the next `n` bytes in the read buffer (like `Peek`), but also +increments the read position. This allows users to process streams +in arbitrary block sizes without having to manage appropriately-sized +slices. Additionally, obviating the need to copy the data from the +buffer to another location in memory can improve performance dramatically +in CPU-bound applications. + +`fwd.Writer` only has one extra method, which is `(*fwd.Writer).Next`, which +returns a slice pointing to the next `n` bytes of the writer, and increments +the write position by the length of the returned slice. This allows users +to write directly to the end of the buffer. + + + + +## Constants +``` go +const ( + // DefaultReaderSize is the default size of the read buffer + DefaultReaderSize = 2048 +) +``` +``` go +const ( + // DefaultWriterSize is the + // default write buffer size. + DefaultWriterSize = 2048 +) +``` + + + +## type Reader +``` go +type Reader struct { + // contains filtered or unexported fields +} +``` +Reader is a buffered look-ahead reader + + + + + + + + + +### func NewReader +``` go +func NewReader(r io.Reader) *Reader +``` +NewReader returns a new *Reader that reads from 'r' + + +### func NewReaderSize +``` go +func NewReaderSize(r io.Reader, n int) *Reader +``` +NewReaderSize returns a new *Reader that +reads from 'r' and has a buffer size 'n' + + + + +### func (\*Reader) BufferSize +``` go +func (r *Reader) BufferSize() int +``` +BufferSize returns the total size of the buffer + + + +### func (\*Reader) Buffered +``` go +func (r *Reader) Buffered() int +``` +Buffered returns the number of bytes currently in the buffer + + + +### func (\*Reader) Next +``` go +func (r *Reader) Next(n int) ([]byte, error) +``` +Next returns the next 'n' bytes in the stream. +Unlike Peek, Next advances the reader position. +The returned bytes point to the same +data as the buffer, so the slice is +only valid until the next reader method call. +An EOF is considered an unexpected error. +If an the returned slice is less than the +length asked for, an error will be returned, +and the reader position will not be incremented. + + + +### func (\*Reader) Peek +``` go +func (r *Reader) Peek(n int) ([]byte, error) +``` +Peek returns the next 'n' buffered bytes, +reading from the underlying reader if necessary. +It will only return a slice shorter than 'n' bytes +if it also returns an error. Peek does not advance +the reader. EOF errors are *not* returned as +io.ErrUnexpectedEOF. + + + +### func (\*Reader) Read +``` go +func (r *Reader) Read(b []byte) (int, error) +``` +Read implements `io.Reader` + + + +### func (\*Reader) ReadByte +``` go +func (r *Reader) ReadByte() (byte, error) +``` +ReadByte implements `io.ByteReader` + + + +### func (\*Reader) ReadFull +``` go +func (r *Reader) ReadFull(b []byte) (int, error) +``` +ReadFull attempts to read len(b) bytes into +'b'. It returns the number of bytes read into +'b', and an error if it does not return len(b). +EOF is considered an unexpected error. + + + +### func (\*Reader) Reset +``` go +func (r *Reader) Reset(rd io.Reader) +``` +Reset resets the underlying reader +and the read buffer. + + + +### func (\*Reader) Skip +``` go +func (r *Reader) Skip(n int) (int, error) +``` +Skip moves the reader forward 'n' bytes. +Returns the number of bytes skipped and any +errors encountered. It is analogous to Seek(n, 1). +If the underlying reader implements io.Seeker, then +that method will be used to skip forward. + +If the reader encounters +an EOF before skipping 'n' bytes, it +returns io.ErrUnexpectedEOF. If the +underlying reader implements io.Seeker, then +those rules apply instead. (Many implementations +will not return `io.EOF` until the next call +to Read.) + + + +### func (\*Reader) WriteTo +``` go +func (r *Reader) WriteTo(w io.Writer) (int64, error) +``` +WriteTo implements `io.WriterTo` + + + +## type Writer +``` go +type Writer struct { + // contains filtered or unexported fields +} +``` +Writer is a buffered writer + + + + + + + + + +### func NewWriter +``` go +func NewWriter(w io.Writer) *Writer +``` +NewWriter returns a new writer +that writes to 'w' and has a buffer +that is `DefaultWriterSize` bytes. + + +### func NewWriterSize +``` go +func NewWriterSize(w io.Writer, size int) *Writer +``` +NewWriterSize returns a new writer +that writes to 'w' and has a buffer +that is 'size' bytes. + + + + +### func (\*Writer) BufferSize +``` go +func (w *Writer) BufferSize() int +``` +BufferSize returns the maximum size of the buffer. + + + +### func (\*Writer) Buffered +``` go +func (w *Writer) Buffered() int +``` +Buffered returns the number of buffered bytes +in the reader. + + + +### func (\*Writer) Flush +``` go +func (w *Writer) Flush() error +``` +Flush flushes any buffered bytes +to the underlying writer. + + + +### func (\*Writer) Next +``` go +func (w *Writer) Next(n int) ([]byte, error) +``` +Next returns the next 'n' free bytes +in the write buffer, flushing the writer +as necessary. Next will return `io.ErrShortBuffer` +if 'n' is greater than the size of the write buffer. +Calls to 'next' increment the write position by +the size of the returned buffer. + + + +### func (\*Writer) ReadFrom +``` go +func (w *Writer) ReadFrom(r io.Reader) (int64, error) +``` +ReadFrom implements `io.ReaderFrom` + + + +### func (\*Writer) Write +``` go +func (w *Writer) Write(p []byte) (int, error) +``` +Write implements `io.Writer` + + + +### func (\*Writer) WriteByte +``` go +func (w *Writer) WriteByte(b byte) error +``` +WriteByte implements `io.ByteWriter` + + + +### func (\*Writer) WriteString +``` go +func (w *Writer) WriteString(s string) (int, error) +``` +WriteString is analogous to Write, but it takes a string. + + + + + + + + + +- - - +Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) \ No newline at end of file diff --git a/vendor/github.com/philhofer/fwd/reader.go b/vendor/github.com/philhofer/fwd/reader.go new file mode 100644 index 0000000..72cc112 --- /dev/null +++ b/vendor/github.com/philhofer/fwd/reader.go @@ -0,0 +1,383 @@ +// The `fwd` package provides a buffered reader +// and writer. Each has methods that help improve +// the encoding/decoding performance of some binary +// protocols. +// +// The `fwd.Writer` and `fwd.Reader` type provide similar +// functionality to their counterparts in `bufio`, plus +// a few extra utility methods that simplify read-ahead +// and write-ahead. I wrote this package to improve serialization +// performance for http://github.com/tinylib/msgp, +// where it provided about a 2x speedup over `bufio` for certain +// workloads. However, care must be taken to understand the semantics of the +// extra methods provided by this package, as they allow +// the user to access and manipulate the buffer memory +// directly. +// +// The extra methods for `fwd.Reader` are `Peek`, `Skip` +// and `Next`. `(*fwd.Reader).Peek`, unlike `(*bufio.Reader).Peek`, +// will re-allocate the read buffer in order to accommodate arbitrarily +// large read-ahead. `(*fwd.Reader).Skip` skips the next `n` bytes +// in the stream, and uses the `io.Seeker` interface if the underlying +// stream implements it. `(*fwd.Reader).Next` returns a slice pointing +// to the next `n` bytes in the read buffer (like `Peek`), but also +// increments the read position. This allows users to process streams +// in arbitrary block sizes without having to manage appropriately-sized +// slices. Additionally, obviating the need to copy the data from the +// buffer to another location in memory can improve performance dramatically +// in CPU-bound applications. +// +// `fwd.Writer` only has one extra method, which is `(*fwd.Writer).Next`, which +// returns a slice pointing to the next `n` bytes of the writer, and increments +// the write position by the length of the returned slice. This allows users +// to write directly to the end of the buffer. +// +package fwd + +import ( + "io" + "os" +) + +const ( + // DefaultReaderSize is the default size of the read buffer + DefaultReaderSize = 2048 + + // minimum read buffer; straight from bufio + minReaderSize = 16 +) + +// NewReader returns a new *Reader that reads from 'r' +func NewReader(r io.Reader) *Reader { + return NewReaderSize(r, DefaultReaderSize) +} + +// NewReaderSize returns a new *Reader that +// reads from 'r' and has a buffer size 'n'. +func NewReaderSize(r io.Reader, n int) *Reader { + buf := make([]byte, 0, max(n, minReaderSize)) + return NewReaderBuf(r, buf) +} + +// NewReaderBuf returns a new *Reader that +// reads from 'r' and uses 'buf' as a buffer. +// 'buf' is not used when has smaller capacity than 16, +// custom buffer is allocated instead. +func NewReaderBuf(r io.Reader, buf []byte) *Reader { + if cap(buf) < minReaderSize { + buf = make([]byte, 0, minReaderSize) + } + buf = buf[:0] + rd := &Reader{ + r: r, + data: buf, + } + if s, ok := r.(io.Seeker); ok { + rd.rs = s + } + return rd +} + +// Reader is a buffered look-ahead reader +type Reader struct { + r io.Reader // underlying reader + + // data[n:len(data)] is buffered data; data[len(data):cap(data)] is free buffer space + data []byte // data + n int // read offset + state error // last read error + + // if the reader past to NewReader was + // also an io.Seeker, this is non-nil + rs io.Seeker +} + +// Reset resets the underlying reader +// and the read buffer. +func (r *Reader) Reset(rd io.Reader) { + r.r = rd + r.data = r.data[0:0] + r.n = 0 + r.state = nil + if s, ok := rd.(io.Seeker); ok { + r.rs = s + } else { + r.rs = nil + } +} + +// more() does one read on the underlying reader +func (r *Reader) more() { + // move data backwards so that + // the read offset is 0; this way + // we can supply the maximum number of + // bytes to the reader + if r.n != 0 { + if r.n < len(r.data) { + r.data = r.data[:copy(r.data[0:], r.data[r.n:])] + } else { + r.data = r.data[:0] + } + r.n = 0 + } + var a int + a, r.state = r.r.Read(r.data[len(r.data):cap(r.data)]) + if a == 0 && r.state == nil { + r.state = io.ErrNoProgress + return + } else if a > 0 && r.state == io.EOF { + // discard the io.EOF if we read more than 0 bytes. + // the next call to Read should return io.EOF again. + r.state = nil + } + r.data = r.data[:len(r.data)+a] +} + +// pop error +func (r *Reader) err() (e error) { + e, r.state = r.state, nil + return +} + +// pop error; EOF -> io.ErrUnexpectedEOF +func (r *Reader) noEOF() (e error) { + e, r.state = r.state, nil + if e == io.EOF { + e = io.ErrUnexpectedEOF + } + return +} + +// buffered bytes +func (r *Reader) buffered() int { return len(r.data) - r.n } + +// Buffered returns the number of bytes currently in the buffer +func (r *Reader) Buffered() int { return len(r.data) - r.n } + +// BufferSize returns the total size of the buffer +func (r *Reader) BufferSize() int { return cap(r.data) } + +// Peek returns the next 'n' buffered bytes, +// reading from the underlying reader if necessary. +// It will only return a slice shorter than 'n' bytes +// if it also returns an error. Peek does not advance +// the reader. EOF errors are *not* returned as +// io.ErrUnexpectedEOF. +func (r *Reader) Peek(n int) ([]byte, error) { + // in the degenerate case, + // we may need to realloc + // (the caller asked for more + // bytes than the size of the buffer) + if cap(r.data) < n { + old := r.data[r.n:] + r.data = make([]byte, n+r.buffered()) + r.data = r.data[:copy(r.data, old)] + r.n = 0 + } + + // keep filling until + // we hit an error or + // read enough bytes + for r.buffered() < n && r.state == nil { + r.more() + } + + // we must have hit an error + if r.buffered() < n { + return r.data[r.n:], r.err() + } + + return r.data[r.n : r.n+n], nil +} + +// discard(n) discards up to 'n' buffered bytes, and +// and returns the number of bytes discarded +func (r *Reader) discard(n int) int { + inbuf := r.buffered() + if inbuf <= n { + r.n = 0 + r.data = r.data[:0] + return inbuf + } + r.n += n + return n +} + +// Skip moves the reader forward 'n' bytes. +// Returns the number of bytes skipped and any +// errors encountered. It is analogous to Seek(n, 1). +// If the underlying reader implements io.Seeker, then +// that method will be used to skip forward. +// +// If the reader encounters +// an EOF before skipping 'n' bytes, it +// returns io.ErrUnexpectedEOF. If the +// underlying reader implements io.Seeker, then +// those rules apply instead. (Many implementations +// will not return `io.EOF` until the next call +// to Read.) +func (r *Reader) Skip(n int) (int, error) { + if n < 0 { + return 0, os.ErrInvalid + } + + // discard some or all of the current buffer + skipped := r.discard(n) + + // if we can Seek() through the remaining bytes, do that + if n > skipped && r.rs != nil { + nn, err := r.rs.Seek(int64(n-skipped), 1) + return int(nn) + skipped, err + } + // otherwise, keep filling the buffer + // and discarding it up to 'n' + for skipped < n && r.state == nil { + r.more() + skipped += r.discard(n - skipped) + } + return skipped, r.noEOF() +} + +// Next returns the next 'n' bytes in the stream. +// Unlike Peek, Next advances the reader position. +// The returned bytes point to the same +// data as the buffer, so the slice is +// only valid until the next reader method call. +// An EOF is considered an unexpected error. +// If an the returned slice is less than the +// length asked for, an error will be returned, +// and the reader position will not be incremented. +func (r *Reader) Next(n int) ([]byte, error) { + + // in case the buffer is too small + if cap(r.data) < n { + old := r.data[r.n:] + r.data = make([]byte, n+r.buffered()) + r.data = r.data[:copy(r.data, old)] + r.n = 0 + } + + // fill at least 'n' bytes + for r.buffered() < n && r.state == nil { + r.more() + } + + if r.buffered() < n { + return r.data[r.n:], r.noEOF() + } + out := r.data[r.n : r.n+n] + r.n += n + return out, nil +} + +// Read implements `io.Reader` +func (r *Reader) Read(b []byte) (int, error) { + // if we have data in the buffer, just + // return that. + if r.buffered() != 0 { + x := copy(b, r.data[r.n:]) + r.n += x + return x, nil + } + var n int + // we have no buffered data; determine + // whether or not to buffer or call + // the underlying reader directly + if len(b) >= cap(r.data) { + n, r.state = r.r.Read(b) + } else { + r.more() + n = copy(b, r.data) + r.n = n + } + if n == 0 { + return 0, r.err() + } + return n, nil +} + +// ReadFull attempts to read len(b) bytes into +// 'b'. It returns the number of bytes read into +// 'b', and an error if it does not return len(b). +// EOF is considered an unexpected error. +func (r *Reader) ReadFull(b []byte) (int, error) { + var n int // read into b + var nn int // scratch + l := len(b) + // either read buffered data, + // or read directly for the underlying + // buffer, or fetch more buffered data. + for n < l && r.state == nil { + if r.buffered() != 0 { + nn = copy(b[n:], r.data[r.n:]) + n += nn + r.n += nn + } else if l-n > cap(r.data) { + nn, r.state = r.r.Read(b[n:]) + n += nn + } else { + r.more() + } + } + if n < l { + return n, r.noEOF() + } + return n, nil +} + +// ReadByte implements `io.ByteReader` +func (r *Reader) ReadByte() (byte, error) { + for r.buffered() < 1 && r.state == nil { + r.more() + } + if r.buffered() < 1 { + return 0, r.err() + } + b := r.data[r.n] + r.n++ + return b, nil +} + +// WriteTo implements `io.WriterTo` +func (r *Reader) WriteTo(w io.Writer) (int64, error) { + var ( + i int64 + ii int + err error + ) + // first, clear buffer + if r.buffered() > 0 { + ii, err = w.Write(r.data[r.n:]) + i += int64(ii) + if err != nil { + return i, err + } + r.data = r.data[0:0] + r.n = 0 + } + for r.state == nil { + // here we just do + // 1:1 reads and writes + r.more() + if r.buffered() > 0 { + ii, err = w.Write(r.data) + i += int64(ii) + if err != nil { + return i, err + } + r.data = r.data[0:0] + r.n = 0 + } + } + if r.state != io.EOF { + return i, r.err() + } + return i, nil +} + +func max(a int, b int) int { + if a < b { + return b + } + return a +} diff --git a/vendor/github.com/philhofer/fwd/writer.go b/vendor/github.com/philhofer/fwd/writer.go new file mode 100644 index 0000000..4d6ea15 --- /dev/null +++ b/vendor/github.com/philhofer/fwd/writer.go @@ -0,0 +1,236 @@ +package fwd + +import "io" + +const ( + // DefaultWriterSize is the + // default write buffer size. + DefaultWriterSize = 2048 + + minWriterSize = minReaderSize +) + +// Writer is a buffered writer +type Writer struct { + w io.Writer // writer + buf []byte // 0:len(buf) is bufered data +} + +// NewWriter returns a new writer +// that writes to 'w' and has a buffer +// that is `DefaultWriterSize` bytes. +func NewWriter(w io.Writer) *Writer { + if wr, ok := w.(*Writer); ok { + return wr + } + return &Writer{ + w: w, + buf: make([]byte, 0, DefaultWriterSize), + } +} + +// NewWriterSize returns a new writer that +// writes to 'w' and has a buffer size 'n'. +func NewWriterSize(w io.Writer, n int) *Writer { + if wr, ok := w.(*Writer); ok && cap(wr.buf) >= n { + return wr + } + buf := make([]byte, 0, max(n, minWriterSize)) + return NewWriterBuf(w, buf) +} + +// NewWriterBuf returns a new writer +// that writes to 'w' and has 'buf' as a buffer. +// 'buf' is not used when has smaller capacity than 18, +// custom buffer is allocated instead. +func NewWriterBuf(w io.Writer, buf []byte) *Writer { + if cap(buf) < minWriterSize { + buf = make([]byte, 0, minWriterSize) + } + buf = buf[:0] + return &Writer{ + w: w, + buf: buf, + } +} + +// Buffered returns the number of buffered bytes +// in the reader. +func (w *Writer) Buffered() int { return len(w.buf) } + +// BufferSize returns the maximum size of the buffer. +func (w *Writer) BufferSize() int { return cap(w.buf) } + +// Flush flushes any buffered bytes +// to the underlying writer. +func (w *Writer) Flush() error { + l := len(w.buf) + if l > 0 { + n, err := w.w.Write(w.buf) + + // if we didn't write the whole + // thing, copy the unwritten + // bytes to the beginnning of the + // buffer. + if n < l && n > 0 { + w.pushback(n) + if err == nil { + err = io.ErrShortWrite + } + } + if err != nil { + return err + } + w.buf = w.buf[:0] + return nil + } + return nil +} + +// Write implements `io.Writer` +func (w *Writer) Write(p []byte) (int, error) { + c, l, ln := cap(w.buf), len(w.buf), len(p) + avail := c - l + + // requires flush + if avail < ln { + if err := w.Flush(); err != nil { + return 0, err + } + l = len(w.buf) + } + // too big to fit in buffer; + // write directly to w.w + if c < ln { + return w.w.Write(p) + } + + // grow buf slice; copy; return + w.buf = w.buf[:l+ln] + return copy(w.buf[l:], p), nil +} + +// WriteString is analogous to Write, but it takes a string. +func (w *Writer) WriteString(s string) (int, error) { + c, l, ln := cap(w.buf), len(w.buf), len(s) + avail := c - l + + // requires flush + if avail < ln { + if err := w.Flush(); err != nil { + return 0, err + } + l = len(w.buf) + } + // too big to fit in buffer; + // write directly to w.w + // + // yes, this is unsafe. *but* + // io.Writer is not allowed + // to mutate its input or + // maintain a reference to it, + // per the spec in package io. + // + // plus, if the string is really + // too big to fit in the buffer, then + // creating a copy to write it is + // expensive (and, strictly speaking, + // unnecessary) + if c < ln { + return w.w.Write(unsafestr(s)) + } + + // grow buf slice; copy; return + w.buf = w.buf[:l+ln] + return copy(w.buf[l:], s), nil +} + +// WriteByte implements `io.ByteWriter` +func (w *Writer) WriteByte(b byte) error { + if len(w.buf) == cap(w.buf) { + if err := w.Flush(); err != nil { + return err + } + } + w.buf = append(w.buf, b) + return nil +} + +// Next returns the next 'n' free bytes +// in the write buffer, flushing the writer +// as necessary. Next will return `io.ErrShortBuffer` +// if 'n' is greater than the size of the write buffer. +// Calls to 'next' increment the write position by +// the size of the returned buffer. +func (w *Writer) Next(n int) ([]byte, error) { + c, l := cap(w.buf), len(w.buf) + if n > c { + return nil, io.ErrShortBuffer + } + avail := c - l + if avail < n { + if err := w.Flush(); err != nil { + return nil, err + } + l = len(w.buf) + } + w.buf = w.buf[:l+n] + return w.buf[l:], nil +} + +// take the bytes from w.buf[n:len(w.buf)] +// and put them at the beginning of w.buf, +// and resize to the length of the copied segment. +func (w *Writer) pushback(n int) { + w.buf = w.buf[:copy(w.buf, w.buf[n:])] +} + +// ReadFrom implements `io.ReaderFrom` +func (w *Writer) ReadFrom(r io.Reader) (int64, error) { + // anticipatory flush + if err := w.Flush(); err != nil { + return 0, err + } + + w.buf = w.buf[0:cap(w.buf)] // expand buffer + + var nn int64 // written + var err error // error + var x int // read + + // 1:1 reads and writes + for err == nil { + x, err = r.Read(w.buf) + if x > 0 { + n, werr := w.w.Write(w.buf[:x]) + nn += int64(n) + + if err != nil { + if n < x && n > 0 { + w.pushback(n - x) + } + return nn, werr + } + if n < x { + w.pushback(n - x) + return nn, io.ErrShortWrite + } + } else if err == nil { + err = io.ErrNoProgress + break + } + } + if err != io.EOF { + return nn, err + } + + // we only clear here + // because we are sure + // the writes have + // succeeded. otherwise, + // we retain the data in case + // future writes succeed. + w.buf = w.buf[0:0] + + return nn, nil +} diff --git a/vendor/github.com/philhofer/fwd/writer_appengine.go b/vendor/github.com/philhofer/fwd/writer_appengine.go new file mode 100644 index 0000000..e367f39 --- /dev/null +++ b/vendor/github.com/philhofer/fwd/writer_appengine.go @@ -0,0 +1,5 @@ +// +build appengine + +package fwd + +func unsafestr(s string) []byte { return []byte(s) } diff --git a/vendor/github.com/philhofer/fwd/writer_unsafe.go b/vendor/github.com/philhofer/fwd/writer_unsafe.go new file mode 100644 index 0000000..a0bf453 --- /dev/null +++ b/vendor/github.com/philhofer/fwd/writer_unsafe.go @@ -0,0 +1,18 @@ +// +build !appengine + +package fwd + +import ( + "reflect" + "unsafe" +) + +// unsafe cast string as []byte +func unsafestr(b string) []byte { + l := len(b) + return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Len: l, + Cap: l, + Data: (*reflect.StringHeader)(unsafe.Pointer(&b)).Data, + })) +} diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml index d4b9266..9159de0 100644 --- a/vendor/github.com/pkg/errors/.travis.yml +++ b/vendor/github.com/pkg/errors/.travis.yml @@ -1,15 +1,10 @@ language: go go_import_path: github.com/pkg/errors go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - 1.11.x + - 1.12.x + - 1.13.x - tip script: - - go test -v ./... + - make check diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile new file mode 100644 index 0000000..ce9d7cd --- /dev/null +++ b/vendor/github.com/pkg/errors/Makefile @@ -0,0 +1,44 @@ +PKGS := github.com/pkg/errors +SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS)) +GO := go + +check: test vet gofmt misspell unconvert staticcheck ineffassign unparam + +test: + $(GO) test $(PKGS) + +vet: | test + $(GO) vet $(PKGS) + +staticcheck: + $(GO) get honnef.co/go/tools/cmd/staticcheck + staticcheck -checks all $(PKGS) + +misspell: + $(GO) get github.com/client9/misspell/cmd/misspell + misspell \ + -locale GB \ + -error \ + *.md *.go + +unconvert: + $(GO) get github.com/mdempsky/unconvert + unconvert -v $(PKGS) + +ineffassign: + $(GO) get github.com/gordonklaus/ineffassign + find $(SRCDIRS) -name '*.go' | xargs ineffassign + +pedantic: check errcheck + +unparam: + $(GO) get mvdan.cc/unparam + unparam ./... + +errcheck: + $(GO) get github.com/kisielk/errcheck + errcheck $(PKGS) + +gofmt: + @echo Checking code is gofmted + @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)" diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md index 6483ba2..54dfdcb 100644 --- a/vendor/github.com/pkg/errors/README.md +++ b/vendor/github.com/pkg/errors/README.md @@ -41,11 +41,18 @@ default: [Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). +## Roadmap + +With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows: + +- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible) +- 1.0. Final release. + ## Contributing -We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. +Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports. -Before proposing a change, please discuss your change by raising an issue. +Before sending a PR, please discuss your change by raising an issue. ## License diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go index 7421f32..161aea2 100644 --- a/vendor/github.com/pkg/errors/errors.go +++ b/vendor/github.com/pkg/errors/errors.go @@ -82,7 +82,7 @@ // // if err, ok := err.(stackTracer); ok { // for _, f := range err.StackTrace() { -// fmt.Printf("%+s:%d", f) +// fmt.Printf("%+s:%d\n", f, f) // } // } // @@ -159,6 +159,9 @@ type withStack struct { func (w *withStack) Cause() error { return w.error } +// Unwrap provides compatibility for Go 1.13 error chains. +func (w *withStack) Unwrap() error { return w.error } + func (w *withStack) Format(s fmt.State, verb rune) { switch verb { case 'v': @@ -241,6 +244,9 @@ type withMessage struct { func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } func (w *withMessage) Cause() error { return w.cause } +// Unwrap provides compatibility for Go 1.13 error chains. +func (w *withMessage) Unwrap() error { return w.cause } + func (w *withMessage) Format(s fmt.State, verb rune) { switch verb { case 'v': diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go new file mode 100644 index 0000000..be0d10d --- /dev/null +++ b/vendor/github.com/pkg/errors/go113.go @@ -0,0 +1,38 @@ +// +build go1.13 + +package errors + +import ( + stderrors "errors" +) + +// Is reports whether any error in err's chain matches target. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error is considered to match a target if it is equal to that target or if +// it implements a method Is(error) bool such that Is(target) returns true. +func Is(err, target error) bool { return stderrors.Is(err, target) } + +// As finds the first error in err's chain that matches target, and if so, sets +// target to that error value and returns true. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error matches target if the error's concrete value is assignable to the value +// pointed to by target, or if the error has a method As(interface{}) bool such that +// As(target) returns true. In the latter case, the As method is responsible for +// setting target. +// +// As will panic if target is not a non-nil pointer to either a type that implements +// error, or to any interface type. As returns false if err is nil. +func As(err error, target interface{}) bool { return stderrors.As(err, target) } + +// Unwrap returns the result of calling the Unwrap method on err, if err's +// type contains an Unwrap method returning error. +// Otherwise, Unwrap returns nil. +func Unwrap(err error) error { + return stderrors.Unwrap(err) +} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go index 2874a04..779a834 100644 --- a/vendor/github.com/pkg/errors/stack.go +++ b/vendor/github.com/pkg/errors/stack.go @@ -5,10 +5,13 @@ import ( "io" "path" "runtime" + "strconv" "strings" ) // Frame represents a program counter inside a stack frame. +// For historical reasons if Frame is interpreted as a uintptr +// its value represents the program counter + 1. type Frame uintptr // pc returns the program counter for this frame; @@ -37,6 +40,15 @@ func (f Frame) line() int { return line } +// name returns the name of this function, if known. +func (f Frame) name() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + return fn.Name() +} + // Format formats the frame according to the fmt.Formatter interface. // // %s source file @@ -54,22 +66,16 @@ func (f Frame) Format(s fmt.State, verb rune) { case 's': switch { case s.Flag('+'): - pc := f.pc() - fn := runtime.FuncForPC(pc) - if fn == nil { - io.WriteString(s, "unknown") - } else { - file, _ := fn.FileLine(pc) - fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) - } + io.WriteString(s, f.name()) + io.WriteString(s, "\n\t") + io.WriteString(s, f.file()) default: io.WriteString(s, path.Base(f.file())) } case 'd': - fmt.Fprintf(s, "%d", f.line()) + io.WriteString(s, strconv.Itoa(f.line())) case 'n': - name := runtime.FuncForPC(f.pc()).Name() - io.WriteString(s, funcname(name)) + io.WriteString(s, funcname(f.name())) case 'v': f.Format(s, 's') io.WriteString(s, ":") @@ -77,6 +83,16 @@ func (f Frame) Format(s fmt.State, verb rune) { } } +// MarshalText formats a stacktrace Frame as a text string. The output is the +// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs. +func (f Frame) MarshalText() ([]byte, error) { + name := f.name() + if name == "unknown" { + return []byte(name), nil + } + return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil +} + // StackTrace is stack of Frames from innermost (newest) to outermost (oldest). type StackTrace []Frame @@ -94,16 +110,30 @@ func (st StackTrace) Format(s fmt.State, verb rune) { switch { case s.Flag('+'): for _, f := range st { - fmt.Fprintf(s, "\n%+v", f) + io.WriteString(s, "\n") + f.Format(s, verb) } case s.Flag('#'): fmt.Fprintf(s, "%#v", []Frame(st)) default: - fmt.Fprintf(s, "%v", []Frame(st)) + st.formatSlice(s, verb) } case 's': - fmt.Fprintf(s, "%s", []Frame(st)) + st.formatSlice(s, verb) + } +} + +// formatSlice will format this StackTrace into the given buffer as a slice of +// Frame, only valid when called with '%s' or '%v'. +func (st StackTrace) formatSlice(s fmt.State, verb rune) { + io.WriteString(s, "[") + for i, f := range st { + if i > 0 { + io.WriteString(s, " ") + } + f.Format(s, verb) } + io.WriteString(s, "]") } // stack represents a stack of program counters. diff --git a/vendor/github.com/stretchr/objx/.codeclimate.yml b/vendor/github.com/stretchr/objx/.codeclimate.yml index 010d4cc..559fa39 100644 --- a/vendor/github.com/stretchr/objx/.codeclimate.yml +++ b/vendor/github.com/stretchr/objx/.codeclimate.yml @@ -10,4 +10,12 @@ exclude_patterns: - ".github/" - "vendor/" - "codegen/" +- "*.yml" +- ".*.yml" +- "*.md" +- "Gopkg.*" - "doc.go" +- "type_specific_codegen_test.go" +- "type_specific_codegen.go" +- ".gitignore" +- "LICENSE" diff --git a/vendor/github.com/stretchr/objx/.travis.yml b/vendor/github.com/stretchr/objx/.travis.yml index a63efa5..cde6eb2 100644 --- a/vendor/github.com/stretchr/objx/.travis.yml +++ b/vendor/github.com/stretchr/objx/.travis.yml @@ -1,8 +1,14 @@ language: go go: - - 1.8 - - 1.9 - - tip + - "1.10.x" + - "1.11.x" + - "1.12.x" + - master + +matrix: + allow_failures: + - go: master +fast_finish: true env: global: @@ -14,12 +20,11 @@ before_script: - ./cc-test-reporter before-build install: -- go get github.com/go-task/task/cmd/task + - curl -sL https://taskfile.dev/install.sh | sh script: -- task dl-deps -- task lint -- task test-coverage + - diff -u <(echo -n) <(./bin/task lint) + - ./bin/task test-coverage after_script: - ./cc-test-reporter after-build --exit-code $TRAVIS_TEST_RESULT diff --git a/vendor/github.com/stretchr/objx/Gopkg.lock b/vendor/github.com/stretchr/objx/Gopkg.lock deleted file mode 100644 index eebe342..0000000 --- a/vendor/github.com/stretchr/objx/Gopkg.lock +++ /dev/null @@ -1,30 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - name = "github.com/davecgh/go-spew" - packages = ["spew"] - revision = "346938d642f2ec3594ed81d874461961cd0faa76" - version = "v1.1.0" - -[[projects]] - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - name = "github.com/stretchr/testify" - packages = [ - "assert", - "require" - ] - revision = "b91bfb9ebec76498946beb6af7c0230c7cc7ba6c" - version = "v1.2.0" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "2d160a7dea4ffd13c6c31dab40373822f9d78c73beba016d662bef8f7a998876" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/stretchr/objx/Gopkg.toml b/vendor/github.com/stretchr/objx/Gopkg.toml deleted file mode 100644 index d70f157..0000000 --- a/vendor/github.com/stretchr/objx/Gopkg.toml +++ /dev/null @@ -1,8 +0,0 @@ -[prune] - unused-packages = true - non-go = true - go-tests = true - -[[constraint]] - name = "github.com/stretchr/testify" - version = "~1.2.0" diff --git a/vendor/github.com/stretchr/objx/README.md b/vendor/github.com/stretchr/objx/README.md index be5750c..246660b 100644 --- a/vendor/github.com/stretchr/objx/README.md +++ b/vendor/github.com/stretchr/objx/README.md @@ -74,7 +74,7 @@ To update Objx to the latest version, run: go get -u github.com/stretchr/objx ### Supported go versions -We support the lastest two major Go versions, which are 1.8 and 1.9 at the moment. +We support the lastest three major Go versions, which are 1.10, 1.11 and 1.12 at the moment. ## Contributing Please feel free to submit issues, fork the repository and send pull requests! diff --git a/vendor/github.com/stretchr/objx/Taskfile.yml b/vendor/github.com/stretchr/objx/Taskfile.yml index f803564..a749ac5 100644 --- a/vendor/github.com/stretchr/objx/Taskfile.yml +++ b/vendor/github.com/stretchr/objx/Taskfile.yml @@ -1,32 +1,30 @@ -default: - deps: [test] +version: '2' -dl-deps: - desc: Downloads cli dependencies - cmds: - - go get -u github.com/golang/lint/golint - - go get -u github.com/golang/dep/cmd/dep +env: + GOFLAGS: -mod=vendor -update-deps: - desc: Updates dependencies - cmds: - - dep ensure - - dep ensure -update +tasks: + default: + deps: [test] -lint: - desc: Runs golint - cmds: - - go fmt $(go list ./... | grep -v /vendor/) - - go vet $(go list ./... | grep -v /vendor/) - - golint $(ls *.go | grep -v "doc.go") - silent: true + lint: + desc: Checks code style + cmds: + - gofmt -d -s *.go + - go vet ./... + silent: true -test: - desc: Runs go tests - cmds: - - go test -race . + lint-fix: + desc: Fixes code style + cmds: + - gofmt -w -s *.go -test-coverage: - desc: Runs go tests and calucates test coverage - cmds: - - go test -coverprofile=c.out . + test: + desc: Runs go tests + cmds: + - go test -race ./... + + test-coverage: + desc: Runs go tests and calucates test coverage + cmds: + - go test -race -coverprofile=c.out ./... diff --git a/vendor/github.com/stretchr/objx/accessors.go b/vendor/github.com/stretchr/objx/accessors.go index 204356a..80ad167 100644 --- a/vendor/github.com/stretchr/objx/accessors.go +++ b/vendor/github.com/stretchr/objx/accessors.go @@ -1,18 +1,34 @@ package objx import ( + "reflect" "regexp" "strconv" "strings" ) -// arrayAccesRegexString is the regex used to extract the array number -// from the access path -const arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` +const ( + // PathSeparator is the character used to separate the elements + // of the keypath. + // + // For example, `location.address.city` + PathSeparator string = "." + + // arrayAccesRegexString is the regex used to extract the array number + // from the access path + arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` + + // mapAccessRegexString is the regex used to extract the map key + // from the access path + mapAccessRegexString = `^([^\[]*)\[([^\]]+)\](.*)$` +) // arrayAccesRegex is the compiled arrayAccesRegexString var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString) +// mapAccessRegex is the compiled mapAccessRegexString +var mapAccessRegex = regexp.MustCompile(mapAccessRegexString) + // Get gets the value using the specified selector and // returns it inside a new Obj object. // @@ -46,103 +62,118 @@ func (m Map) Set(selector string, value interface{}) Map { return m } -// access accesses the object using the selector and performs the -// appropriate action. -func access(current, selector, value interface{}, isSet bool) interface{} { - switch selector.(type) { - case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: - if array, ok := current.([]interface{}); ok { - index := intFromInterface(selector) - if index >= len(array) { - return nil +// getIndex returns the index, which is hold in s by two braches. +// It also returns s withour the index part, e.g. name[1] will return (1, name). +// If no index is found, -1 is returned +func getIndex(s string) (int, string) { + arrayMatches := arrayAccesRegex.FindStringSubmatch(s) + if len(arrayMatches) > 0 { + // Get the key into the map + selector := arrayMatches[1] + // Get the index into the array at the key + // We know this cannt fail because arrayMatches[2] is an int for sure + index, _ := strconv.Atoi(arrayMatches[2]) + return index, selector + } + return -1, s +} + +// getKey returns the key which is held in s by two brackets. +// It also returns the next selector. +func getKey(s string) (string, string) { + selSegs := strings.SplitN(s, PathSeparator, 2) + thisSel := selSegs[0] + nextSel := "" + + if len(selSegs) > 1 { + nextSel = selSegs[1] + } + + mapMatches := mapAccessRegex.FindStringSubmatch(s) + if len(mapMatches) > 0 { + if _, err := strconv.Atoi(mapMatches[2]); err != nil { + thisSel = mapMatches[1] + nextSel = "[" + mapMatches[2] + "]" + mapMatches[3] + + if thisSel == "" { + thisSel = mapMatches[2] + nextSel = mapMatches[3] } - return array[index] - } - return nil - - case string: - selStr := selector.(string) - selSegs := strings.SplitN(selStr, PathSeparator, 2) - thisSel := selSegs[0] - index := -1 - var err error - - if strings.Contains(thisSel, "[") { - arrayMatches := arrayAccesRegex.FindStringSubmatch(thisSel) - if len(arrayMatches) > 0 { - // Get the key into the map - thisSel = arrayMatches[1] - - // Get the index into the array at the key - index, err = strconv.Atoi(arrayMatches[2]) - - if err != nil { - // This should never happen. If it does, something has gone - // seriously wrong. Panic. - panic("objx: Array index is not an integer. Must use array[int].") - } + + if nextSel == "" { + selSegs = []string{"", ""} + } else if nextSel[0] == '.' { + nextSel = nextSel[1:] } } - if curMap, ok := current.(Map); ok { - current = map[string]interface{}(curMap) + } + + return thisSel, nextSel +} + +// access accesses the object using the selector and performs the +// appropriate action. +func access(current interface{}, selector string, value interface{}, isSet bool) interface{} { + thisSel, nextSel := getKey(selector) + + index := -1 + if strings.Contains(thisSel, "[") { + index, thisSel = getIndex(thisSel) + } + + if curMap, ok := current.(Map); ok { + current = map[string]interface{}(curMap) + } + // get the object in question + switch current.(type) { + case map[string]interface{}: + curMSI := current.(map[string]interface{}) + if nextSel == "" && isSet { + curMSI[thisSel] = value + return nil } - // get the object in question - switch current.(type) { - case map[string]interface{}: - curMSI := current.(map[string]interface{}) - if len(selSegs) <= 1 && isSet { - curMSI[thisSel] = value - return nil - } - current = curMSI[thisSel] - default: - current = nil + + _, ok := curMSI[thisSel].(map[string]interface{}) + if (curMSI[thisSel] == nil || !ok) && index == -1 && isSet { + curMSI[thisSel] = map[string]interface{}{} } - // do we need to access the item of an array? - if index > -1 { - if array, ok := current.([]interface{}); ok { - if index < len(array) { - current = array[index] - } else { - current = nil - } + + current = curMSI[thisSel] + default: + current = nil + } + + // do we need to access the item of an array? + if index > -1 { + if array, ok := interSlice(current); ok { + if index < len(array) { + current = array[index] + } else { + current = nil } } - if len(selSegs) > 1 { - current = access(current, selSegs[1], value, isSet) - } + } + if nextSel != "" { + current = access(current, nextSel, value, isSet) } return current } -// intFromInterface converts an interface object to the largest -// representation of an unsigned integer using a type switch and -// assertions -func intFromInterface(selector interface{}) int { - var value int - switch selector.(type) { - case int: - value = selector.(int) - case int8: - value = int(selector.(int8)) - case int16: - value = int(selector.(int16)) - case int32: - value = int(selector.(int32)) - case int64: - value = int(selector.(int64)) - case uint: - value = int(selector.(uint)) - case uint8: - value = int(selector.(uint8)) - case uint16: - value = int(selector.(uint16)) - case uint32: - value = int(selector.(uint32)) - case uint64: - value = int(selector.(uint64)) - default: - return 0 +func interSlice(slice interface{}) ([]interface{}, bool) { + if array, ok := slice.([]interface{}); ok { + return array, ok + } + + s := reflect.ValueOf(slice) + if s.Kind() != reflect.Slice { + return nil, false } - return value + + ret := make([]interface{}, s.Len()) + + for i := 0; i < s.Len(); i++ { + ret[i] = s.Index(i).Interface() + } + + return ret, true } diff --git a/vendor/github.com/stretchr/objx/constants.go b/vendor/github.com/stretchr/objx/constants.go deleted file mode 100644 index f9eb42a..0000000 --- a/vendor/github.com/stretchr/objx/constants.go +++ /dev/null @@ -1,13 +0,0 @@ -package objx - -const ( - // PathSeparator is the character used to separate the elements - // of the keypath. - // - // For example, `location.address.city` - PathSeparator string = "." - - // SignatureSeparator is the character that is used to - // separate the Base64 string from the security signature. - SignatureSeparator = "_" -) diff --git a/vendor/github.com/stretchr/objx/conversions.go b/vendor/github.com/stretchr/objx/conversions.go index 5e020f3..080aa46 100644 --- a/vendor/github.com/stretchr/objx/conversions.go +++ b/vendor/github.com/stretchr/objx/conversions.go @@ -7,11 +7,51 @@ import ( "errors" "fmt" "net/url" + "strconv" ) +// SignatureSeparator is the character that is used to +// separate the Base64 string from the security signature. +const SignatureSeparator = "_" + +// URLValuesSliceKeySuffix is the character that is used to +// specify a suffic for slices parsed by URLValues. +// If the suffix is set to "[i]", then the index of the slice +// is used in place of i +// Ex: Suffix "[]" would have the form a[]=b&a[]=c +// OR Suffix "[i]" would have the form a[0]=b&a[1]=c +// OR Suffix "" would have the form a=b&a=c +var urlValuesSliceKeySuffix = "[]" + +const ( + URLValuesSliceKeySuffixEmpty = "" + URLValuesSliceKeySuffixArray = "[]" + URLValuesSliceKeySuffixIndex = "[i]" +) + +// SetURLValuesSliceKeySuffix sets the character that is used to +// specify a suffic for slices parsed by URLValues. +// If the suffix is set to "[i]", then the index of the slice +// is used in place of i +// Ex: Suffix "[]" would have the form a[]=b&a[]=c +// OR Suffix "[i]" would have the form a[0]=b&a[1]=c +// OR Suffix "" would have the form a=b&a=c +func SetURLValuesSliceKeySuffix(s string) error { + if s == URLValuesSliceKeySuffixEmpty || s == URLValuesSliceKeySuffixArray || s == URLValuesSliceKeySuffixIndex { + urlValuesSliceKeySuffix = s + return nil + } + + return errors.New("objx: Invalid URLValuesSliceKeySuffix provided.") +} + // JSON converts the contained object to a JSON string // representation func (m Map) JSON() (string, error) { + for k, v := range m { + m[k] = cleanUp(v) + } + result, err := json.Marshal(m) if err != nil { err = errors.New("objx: JSON encode failed with: " + err.Error()) @@ -19,6 +59,63 @@ func (m Map) JSON() (string, error) { return string(result), err } +func cleanUpInterfaceArray(in []interface{}) []interface{} { + result := make([]interface{}, len(in)) + for i, v := range in { + result[i] = cleanUp(v) + } + return result +} + +func cleanUpInterfaceMap(in map[interface{}]interface{}) Map { + result := Map{} + for k, v := range in { + result[fmt.Sprintf("%v", k)] = cleanUp(v) + } + return result +} + +func cleanUpStringMap(in map[string]interface{}) Map { + result := Map{} + for k, v := range in { + result[k] = cleanUp(v) + } + return result +} + +func cleanUpMSIArray(in []map[string]interface{}) []Map { + result := make([]Map, len(in)) + for i, v := range in { + result[i] = cleanUpStringMap(v) + } + return result +} + +func cleanUpMapArray(in []Map) []Map { + result := make([]Map, len(in)) + for i, v := range in { + result[i] = cleanUpStringMap(v) + } + return result +} + +func cleanUp(v interface{}) interface{} { + switch v := v.(type) { + case []interface{}: + return cleanUpInterfaceArray(v) + case []map[string]interface{}: + return cleanUpMSIArray(v) + case map[interface{}]interface{}: + return cleanUpInterfaceMap(v) + case Map: + return cleanUpStringMap(v) + case []Map: + return cleanUpMapArray(v) + default: + return v + } +} + // MustJSON converts the contained object to a JSON string // representation and panics if there is an error func (m Map) MustJSON() string { @@ -40,10 +137,7 @@ func (m Map) Base64() (string, error) { } encoder := base64.NewEncoder(base64.StdEncoding, &buf) - _, err = encoder.Write([]byte(jsonData)) - if err != nil { - return "", err - } + _, _ = encoder.Write([]byte(jsonData)) _ = encoder.Close() return buf.String(), nil @@ -93,13 +187,91 @@ func (m Map) MustSignedBase64(key string) string { // function requires that the wrapped object be a map[string]interface{} func (m Map) URLValues() url.Values { vals := make(url.Values) - for k, v := range m { - //TODO: can this be done without sprintf? - vals.Set(k, fmt.Sprintf("%v", v)) - } + + m.parseURLValues(m, vals, "") + return vals } +func (m Map) parseURLValues(queryMap Map, vals url.Values, key string) { + useSliceIndex := false + if urlValuesSliceKeySuffix == "[i]" { + useSliceIndex = true + } + + for k, v := range queryMap { + val := &Value{data: v} + switch { + case val.IsObjxMap(): + if key == "" { + m.parseURLValues(val.ObjxMap(), vals, k) + } else { + m.parseURLValues(val.ObjxMap(), vals, key+"["+k+"]") + } + case val.IsObjxMapSlice(): + sliceKey := k + if key != "" { + sliceKey = key + "[" + k + "]" + } + + if useSliceIndex { + for i, sv := range val.MustObjxMapSlice() { + sk := sliceKey + "[" + strconv.FormatInt(int64(i), 10) + "]" + m.parseURLValues(sv, vals, sk) + } + } else { + sliceKey = sliceKey + urlValuesSliceKeySuffix + for _, sv := range val.MustObjxMapSlice() { + m.parseURLValues(sv, vals, sliceKey) + } + } + case val.IsMSISlice(): + sliceKey := k + if key != "" { + sliceKey = key + "[" + k + "]" + } + + if useSliceIndex { + for i, sv := range val.MustMSISlice() { + sk := sliceKey + "[" + strconv.FormatInt(int64(i), 10) + "]" + m.parseURLValues(New(sv), vals, sk) + } + } else { + sliceKey = sliceKey + urlValuesSliceKeySuffix + for _, sv := range val.MustMSISlice() { + m.parseURLValues(New(sv), vals, sliceKey) + } + } + case val.IsStrSlice(), val.IsBoolSlice(), + val.IsFloat32Slice(), val.IsFloat64Slice(), + val.IsIntSlice(), val.IsInt8Slice(), val.IsInt16Slice(), val.IsInt32Slice(), val.IsInt64Slice(), + val.IsUintSlice(), val.IsUint8Slice(), val.IsUint16Slice(), val.IsUint32Slice(), val.IsUint64Slice(): + + sliceKey := k + if key != "" { + sliceKey = key + "[" + k + "]" + } + + if useSliceIndex { + for i, sv := range val.StringSlice() { + sk := sliceKey + "[" + strconv.FormatInt(int64(i), 10) + "]" + vals.Set(sk, sv) + } + } else { + sliceKey = sliceKey + urlValuesSliceKeySuffix + vals[sliceKey] = val.StringSlice() + } + + default: + if key == "" { + vals.Set(k, val.String()) + } else { + vals.Set(key+"["+k+"]", val.String()) + } + } + } +} + // URLQuery gets an encoded URL query representing the given // Obj. This function requires that the wrapped object be a // map[string]interface{} diff --git a/vendor/github.com/stretchr/objx/go.mod b/vendor/github.com/stretchr/objx/go.mod new file mode 100644 index 0000000..31ec5a7 --- /dev/null +++ b/vendor/github.com/stretchr/objx/go.mod @@ -0,0 +1,8 @@ +module github.com/stretchr/objx + +go 1.12 + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/stretchr/testify v1.3.0 +) diff --git a/vendor/github.com/stretchr/objx/go.sum b/vendor/github.com/stretchr/objx/go.sum new file mode 100644 index 0000000..4f89841 --- /dev/null +++ b/vendor/github.com/stretchr/objx/go.sum @@ -0,0 +1,8 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/stretchr/objx/map.go b/vendor/github.com/stretchr/objx/map.go index 406bc89..95149c0 100644 --- a/vendor/github.com/stretchr/objx/map.go +++ b/vendor/github.com/stretchr/objx/map.go @@ -97,12 +97,50 @@ func MustFromJSON(jsonString string) Map { // // Returns an error if the JSON is invalid. func FromJSON(jsonString string) (Map, error) { - var data interface{} - err := json.Unmarshal([]byte(jsonString), &data) + var m Map + err := json.Unmarshal([]byte(jsonString), &m) if err != nil { return Nil, err } - return New(data), nil + m.tryConvertFloat64() + return m, nil +} + +func (m Map) tryConvertFloat64() { + for k, v := range m { + switch v.(type) { + case float64: + f := v.(float64) + if float64(int(f)) == f { + m[k] = int(f) + } + case map[string]interface{}: + t := New(v) + t.tryConvertFloat64() + m[k] = t + case []interface{}: + m[k] = tryConvertFloat64InSlice(v.([]interface{})) + } + } +} + +func tryConvertFloat64InSlice(s []interface{}) []interface{} { + for k, v := range s { + switch v.(type) { + case float64: + f := v.(float64) + if float64(int(f)) == f { + s[k] = int(f) + } + case map[string]interface{}: + t := New(v) + t.tryConvertFloat64() + s[k] = t + case []interface{}: + s[k] = tryConvertFloat64InSlice(v.([]interface{})) + } + } + return s } // FromBase64 creates a new Obj containing the data specified diff --git a/vendor/github.com/stretchr/objx/type_specific.go b/vendor/github.com/stretchr/objx/type_specific.go new file mode 100644 index 0000000..80f88d9 --- /dev/null +++ b/vendor/github.com/stretchr/objx/type_specific.go @@ -0,0 +1,346 @@ +package objx + +/* + MSI (map[string]interface{} and []map[string]interface{}) +*/ + +// MSI gets the value as a map[string]interface{}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) MSI(optionalDefault ...map[string]interface{}) map[string]interface{} { + if s, ok := v.data.(map[string]interface{}); ok { + return s + } + if s, ok := v.data.(Map); ok { + return map[string]interface{}(s) + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustMSI gets the value as a map[string]interface{}. +// +// Panics if the object is not a map[string]interface{}. +func (v *Value) MustMSI() map[string]interface{} { + if s, ok := v.data.(Map); ok { + return map[string]interface{}(s) + } + return v.data.(map[string]interface{}) +} + +// MSISlice gets the value as a []map[string]interface{}, returns the optionalDefault +// value or nil if the value is not a []map[string]interface{}. +func (v *Value) MSISlice(optionalDefault ...[]map[string]interface{}) []map[string]interface{} { + if s, ok := v.data.([]map[string]interface{}); ok { + return s + } + + s := v.ObjxMapSlice() + if s == nil { + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil + } + + result := make([]map[string]interface{}, len(s)) + for i := range s { + result[i] = s[i].Value().MSI() + } + return result +} + +// MustMSISlice gets the value as a []map[string]interface{}. +// +// Panics if the object is not a []map[string]interface{}. +func (v *Value) MustMSISlice() []map[string]interface{} { + if s := v.MSISlice(); s != nil { + return s + } + + return v.data.([]map[string]interface{}) +} + +// IsMSI gets whether the object contained is a map[string]interface{} or not. +func (v *Value) IsMSI() bool { + _, ok := v.data.(map[string]interface{}) + if !ok { + _, ok = v.data.(Map) + } + return ok +} + +// IsMSISlice gets whether the object contained is a []map[string]interface{} or not. +func (v *Value) IsMSISlice() bool { + _, ok := v.data.([]map[string]interface{}) + if !ok { + _, ok = v.data.([]Map) + if !ok { + s, ok := v.data.([]interface{}) + if ok { + for i := range s { + switch s[i].(type) { + case Map: + case map[string]interface{}: + default: + return false + } + } + return true + } + } + } + return ok +} + +// EachMSI calls the specified callback for each object +// in the []map[string]interface{}. +// +// Panics if the object is the wrong type. +func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value { + for index, val := range v.MustMSISlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereMSI uses the specified decider function to select items +// from the []map[string]interface{}. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value { + var selected []map[string]interface{} + v.EachMSI(func(index int, val map[string]interface{}) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupMSI uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]map[string]interface{}. +func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value { + groups := make(map[string][]map[string]interface{}) + v.EachMSI(func(index int, val map[string]interface{}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]map[string]interface{}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceMSI uses the specified function to replace each map[string]interface{}s +// by iterating each item. The data in the returned result will be a +// []map[string]interface{} containing the replaced items. +func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value { + arr := v.MustMSISlice() + replaced := make([]map[string]interface{}, len(arr)) + v.EachMSI(func(index int, val map[string]interface{}) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectMSI uses the specified collector function to collect a value +// for each of the map[string]interface{}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value { + arr := v.MustMSISlice() + collected := make([]interface{}, len(arr)) + v.EachMSI(func(index int, val map[string]interface{}) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + ObjxMap ((Map) and [](Map)) +*/ + +// ObjxMap gets the value as a (Map), returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) ObjxMap(optionalDefault ...(Map)) Map { + if s, ok := v.data.((Map)); ok { + return s + } + if s, ok := v.data.(map[string]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return New(nil) +} + +// MustObjxMap gets the value as a (Map). +// +// Panics if the object is not a (Map). +func (v *Value) MustObjxMap() Map { + if s, ok := v.data.(map[string]interface{}); ok { + return s + } + return v.data.((Map)) +} + +// ObjxMapSlice gets the value as a [](Map), returns the optionalDefault +// value or nil if the value is not a [](Map). +func (v *Value) ObjxMapSlice(optionalDefault ...[](Map)) [](Map) { + if s, ok := v.data.([]Map); ok { + return s + } + + if s, ok := v.data.([]map[string]interface{}); ok { + result := make([]Map, len(s)) + for i := range s { + result[i] = s[i] + } + return result + } + + s, ok := v.data.([]interface{}) + if !ok { + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil + } + + result := make([]Map, len(s)) + for i := range s { + switch s[i].(type) { + case Map: + result[i] = s[i].(Map) + case map[string]interface{}: + result[i] = New(s[i]) + default: + return nil + } + } + return result +} + +// MustObjxMapSlice gets the value as a [](Map). +// +// Panics if the object is not a [](Map). +func (v *Value) MustObjxMapSlice() [](Map) { + if s := v.ObjxMapSlice(); s != nil { + return s + } + return v.data.([](Map)) +} + +// IsObjxMap gets whether the object contained is a (Map) or not. +func (v *Value) IsObjxMap() bool { + _, ok := v.data.((Map)) + if !ok { + _, ok = v.data.(map[string]interface{}) + } + return ok +} + +// IsObjxMapSlice gets whether the object contained is a [](Map) or not. +func (v *Value) IsObjxMapSlice() bool { + _, ok := v.data.([](Map)) + if !ok { + _, ok = v.data.([]map[string]interface{}) + if !ok { + s, ok := v.data.([]interface{}) + if ok { + for i := range s { + switch s[i].(type) { + case Map: + case map[string]interface{}: + default: + return false + } + } + return true + } + } + } + + return ok +} + +// EachObjxMap calls the specified callback for each object +// in the [](Map). +// +// Panics if the object is the wrong type. +func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value { + for index, val := range v.MustObjxMapSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereObjxMap uses the specified decider function to select items +// from the [](Map). The object contained in the result will contain +// only the selected items. +func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value { + var selected [](Map) + v.EachObjxMap(func(index int, val Map) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupObjxMap uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][](Map). +func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value { + groups := make(map[string][](Map)) + v.EachObjxMap(func(index int, val Map) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([](Map), 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceObjxMap uses the specified function to replace each (Map)s +// by iterating each item. The data in the returned result will be a +// [](Map) containing the replaced items. +func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value { + arr := v.MustObjxMapSlice() + replaced := make([](Map), len(arr)) + v.EachObjxMap(func(index int, val Map) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectObjxMap uses the specified collector function to collect a value +// for each of the (Map)s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value { + arr := v.MustObjxMapSlice() + collected := make([]interface{}, len(arr)) + v.EachObjxMap(func(index int, val Map) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} diff --git a/vendor/github.com/stretchr/objx/type_specific_codegen.go b/vendor/github.com/stretchr/objx/type_specific_codegen.go index 202a91f..9859b40 100644 --- a/vendor/github.com/stretchr/objx/type_specific_codegen.go +++ b/vendor/github.com/stretchr/objx/type_specific_codegen.go @@ -1,7 +1,7 @@ package objx /* - Inter (interface{} and []interface{}) + Inter (interface{} and []interface{}) */ // Inter gets the value as a interface{}, returns the optionalDefault @@ -126,257 +126,7 @@ func (v *Value) CollectInter(collector func(int, interface{}) interface{}) *Valu } /* - MSI (map[string]interface{} and []map[string]interface{}) -*/ - -// MSI gets the value as a map[string]interface{}, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) MSI(optionalDefault ...map[string]interface{}) map[string]interface{} { - if s, ok := v.data.(map[string]interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustMSI gets the value as a map[string]interface{}. -// -// Panics if the object is not a map[string]interface{}. -func (v *Value) MustMSI() map[string]interface{} { - return v.data.(map[string]interface{}) -} - -// MSISlice gets the value as a []map[string]interface{}, returns the optionalDefault -// value or nil if the value is not a []map[string]interface{}. -func (v *Value) MSISlice(optionalDefault ...[]map[string]interface{}) []map[string]interface{} { - if s, ok := v.data.([]map[string]interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustMSISlice gets the value as a []map[string]interface{}. -// -// Panics if the object is not a []map[string]interface{}. -func (v *Value) MustMSISlice() []map[string]interface{} { - return v.data.([]map[string]interface{}) -} - -// IsMSI gets whether the object contained is a map[string]interface{} or not. -func (v *Value) IsMSI() bool { - _, ok := v.data.(map[string]interface{}) - return ok -} - -// IsMSISlice gets whether the object contained is a []map[string]interface{} or not. -func (v *Value) IsMSISlice() bool { - _, ok := v.data.([]map[string]interface{}) - return ok -} - -// EachMSI calls the specified callback for each object -// in the []map[string]interface{}. -// -// Panics if the object is the wrong type. -func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value { - for index, val := range v.MustMSISlice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereMSI uses the specified decider function to select items -// from the []map[string]interface{}. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value { - var selected []map[string]interface{} - v.EachMSI(func(index int, val map[string]interface{}) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupMSI uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]map[string]interface{}. -func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value { - groups := make(map[string][]map[string]interface{}) - v.EachMSI(func(index int, val map[string]interface{}) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]map[string]interface{}, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceMSI uses the specified function to replace each map[string]interface{}s -// by iterating each item. The data in the returned result will be a -// []map[string]interface{} containing the replaced items. -func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value { - arr := v.MustMSISlice() - replaced := make([]map[string]interface{}, len(arr)) - v.EachMSI(func(index int, val map[string]interface{}) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectMSI uses the specified collector function to collect a value -// for each of the map[string]interface{}s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value { - arr := v.MustMSISlice() - collected := make([]interface{}, len(arr)) - v.EachMSI(func(index int, val map[string]interface{}) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - ObjxMap ((Map) and [](Map)) -*/ - -// ObjxMap gets the value as a (Map), returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) ObjxMap(optionalDefault ...(Map)) Map { - if s, ok := v.data.((Map)); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return New(nil) -} - -// MustObjxMap gets the value as a (Map). -// -// Panics if the object is not a (Map). -func (v *Value) MustObjxMap() Map { - return v.data.((Map)) -} - -// ObjxMapSlice gets the value as a [](Map), returns the optionalDefault -// value or nil if the value is not a [](Map). -func (v *Value) ObjxMapSlice(optionalDefault ...[](Map)) [](Map) { - if s, ok := v.data.([](Map)); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustObjxMapSlice gets the value as a [](Map). -// -// Panics if the object is not a [](Map). -func (v *Value) MustObjxMapSlice() [](Map) { - return v.data.([](Map)) -} - -// IsObjxMap gets whether the object contained is a (Map) or not. -func (v *Value) IsObjxMap() bool { - _, ok := v.data.((Map)) - return ok -} - -// IsObjxMapSlice gets whether the object contained is a [](Map) or not. -func (v *Value) IsObjxMapSlice() bool { - _, ok := v.data.([](Map)) - return ok -} - -// EachObjxMap calls the specified callback for each object -// in the [](Map). -// -// Panics if the object is the wrong type. -func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value { - for index, val := range v.MustObjxMapSlice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereObjxMap uses the specified decider function to select items -// from the [](Map). The object contained in the result will contain -// only the selected items. -func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value { - var selected [](Map) - v.EachObjxMap(func(index int, val Map) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupObjxMap uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][](Map). -func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value { - groups := make(map[string][](Map)) - v.EachObjxMap(func(index int, val Map) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([](Map), 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceObjxMap uses the specified function to replace each (Map)s -// by iterating each item. The data in the returned result will be a -// [](Map) containing the replaced items. -func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value { - arr := v.MustObjxMapSlice() - replaced := make([](Map), len(arr)) - v.EachObjxMap(func(index int, val Map) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectObjxMap uses the specified collector function to collect a value -// for each of the (Map)s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value { - arr := v.MustObjxMapSlice() - collected := make([]interface{}, len(arr)) - v.EachObjxMap(func(index int, val Map) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Bool (bool and []bool) + Bool (bool and []bool) */ // Bool gets the value as a bool, returns the optionalDefault @@ -501,7 +251,7 @@ func (v *Value) CollectBool(collector func(int, bool) interface{}) *Value { } /* - Str (string and []string) + Str (string and []string) */ // Str gets the value as a string, returns the optionalDefault @@ -626,7 +376,7 @@ func (v *Value) CollectStr(collector func(int, string) interface{}) *Value { } /* - Int (int and []int) + Int (int and []int) */ // Int gets the value as a int, returns the optionalDefault @@ -751,7 +501,7 @@ func (v *Value) CollectInt(collector func(int, int) interface{}) *Value { } /* - Int8 (int8 and []int8) + Int8 (int8 and []int8) */ // Int8 gets the value as a int8, returns the optionalDefault @@ -876,7 +626,7 @@ func (v *Value) CollectInt8(collector func(int, int8) interface{}) *Value { } /* - Int16 (int16 and []int16) + Int16 (int16 and []int16) */ // Int16 gets the value as a int16, returns the optionalDefault @@ -1001,7 +751,7 @@ func (v *Value) CollectInt16(collector func(int, int16) interface{}) *Value { } /* - Int32 (int32 and []int32) + Int32 (int32 and []int32) */ // Int32 gets the value as a int32, returns the optionalDefault @@ -1126,7 +876,7 @@ func (v *Value) CollectInt32(collector func(int, int32) interface{}) *Value { } /* - Int64 (int64 and []int64) + Int64 (int64 and []int64) */ // Int64 gets the value as a int64, returns the optionalDefault @@ -1251,7 +1001,7 @@ func (v *Value) CollectInt64(collector func(int, int64) interface{}) *Value { } /* - Uint (uint and []uint) + Uint (uint and []uint) */ // Uint gets the value as a uint, returns the optionalDefault @@ -1376,7 +1126,7 @@ func (v *Value) CollectUint(collector func(int, uint) interface{}) *Value { } /* - Uint8 (uint8 and []uint8) + Uint8 (uint8 and []uint8) */ // Uint8 gets the value as a uint8, returns the optionalDefault @@ -1501,7 +1251,7 @@ func (v *Value) CollectUint8(collector func(int, uint8) interface{}) *Value { } /* - Uint16 (uint16 and []uint16) + Uint16 (uint16 and []uint16) */ // Uint16 gets the value as a uint16, returns the optionalDefault @@ -1626,7 +1376,7 @@ func (v *Value) CollectUint16(collector func(int, uint16) interface{}) *Value { } /* - Uint32 (uint32 and []uint32) + Uint32 (uint32 and []uint32) */ // Uint32 gets the value as a uint32, returns the optionalDefault @@ -1751,7 +1501,7 @@ func (v *Value) CollectUint32(collector func(int, uint32) interface{}) *Value { } /* - Uint64 (uint64 and []uint64) + Uint64 (uint64 and []uint64) */ // Uint64 gets the value as a uint64, returns the optionalDefault @@ -1876,7 +1626,7 @@ func (v *Value) CollectUint64(collector func(int, uint64) interface{}) *Value { } /* - Uintptr (uintptr and []uintptr) + Uintptr (uintptr and []uintptr) */ // Uintptr gets the value as a uintptr, returns the optionalDefault @@ -2001,7 +1751,7 @@ func (v *Value) CollectUintptr(collector func(int, uintptr) interface{}) *Value } /* - Float32 (float32 and []float32) + Float32 (float32 and []float32) */ // Float32 gets the value as a float32, returns the optionalDefault @@ -2126,7 +1876,7 @@ func (v *Value) CollectFloat32(collector func(int, float32) interface{}) *Value } /* - Float64 (float64 and []float64) + Float64 (float64 and []float64) */ // Float64 gets the value as a float64, returns the optionalDefault @@ -2251,7 +2001,7 @@ func (v *Value) CollectFloat64(collector func(int, float64) interface{}) *Value } /* - Complex64 (complex64 and []complex64) + Complex64 (complex64 and []complex64) */ // Complex64 gets the value as a complex64, returns the optionalDefault @@ -2376,7 +2126,7 @@ func (v *Value) CollectComplex64(collector func(int, complex64) interface{}) *Va } /* - Complex128 (complex128 and []complex128) + Complex128 (complex128 and []complex128) */ // Complex128 gets the value as a complex128, returns the optionalDefault diff --git a/vendor/github.com/stretchr/objx/value.go b/vendor/github.com/stretchr/objx/value.go index e4b4a14..4e5f9b7 100644 --- a/vendor/github.com/stretchr/objx/value.go +++ b/vendor/github.com/stretchr/objx/value.go @@ -20,6 +20,8 @@ func (v *Value) Data() interface{} { // String returns the value always as a string func (v *Value) String() string { switch { + case v.IsNil(): + return "" case v.IsStr(): return v.Str() case v.IsBool(): @@ -51,3 +53,107 @@ func (v *Value) String() string { } return fmt.Sprintf("%#v", v.Data()) } + +// StringSlice returns the value always as a []string +func (v *Value) StringSlice(optionalDefault ...[]string) []string { + switch { + case v.IsStrSlice(): + return v.MustStrSlice() + case v.IsBoolSlice(): + slice := v.MustBoolSlice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatBool(iv) + } + return vals + case v.IsFloat32Slice(): + slice := v.MustFloat32Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatFloat(float64(iv), 'f', -1, 32) + } + return vals + case v.IsFloat64Slice(): + slice := v.MustFloat64Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatFloat(iv, 'f', -1, 64) + } + return vals + case v.IsIntSlice(): + slice := v.MustIntSlice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatInt(int64(iv), 10) + } + return vals + case v.IsInt8Slice(): + slice := v.MustInt8Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatInt(int64(iv), 10) + } + return vals + case v.IsInt16Slice(): + slice := v.MustInt16Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatInt(int64(iv), 10) + } + return vals + case v.IsInt32Slice(): + slice := v.MustInt32Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatInt(int64(iv), 10) + } + return vals + case v.IsInt64Slice(): + slice := v.MustInt64Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatInt(iv, 10) + } + return vals + case v.IsUintSlice(): + slice := v.MustUintSlice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatUint(uint64(iv), 10) + } + return vals + case v.IsUint8Slice(): + slice := v.MustUint8Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatUint(uint64(iv), 10) + } + return vals + case v.IsUint16Slice(): + slice := v.MustUint16Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatUint(uint64(iv), 10) + } + return vals + case v.IsUint32Slice(): + slice := v.MustUint32Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatUint(uint64(iv), 10) + } + return vals + case v.IsUint64Slice(): + slice := v.MustUint64Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatUint(iv, 10) + } + return vals + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + + return []string{} +} diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE index f38ec59..4b0421c 100644 --- a/vendor/github.com/stretchr/testify/LICENSE +++ b/vendor/github.com/stretchr/testify/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2012-2018 Mat Ryer and Tyler Bunnell +Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go new file mode 100644 index 0000000..41649d2 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -0,0 +1,394 @@ +package assert + +import ( + "fmt" + "reflect" +) + +type CompareType int + +const ( + compareLess CompareType = iota - 1 + compareEqual + compareGreater +) + +var ( + intType = reflect.TypeOf(int(1)) + int8Type = reflect.TypeOf(int8(1)) + int16Type = reflect.TypeOf(int16(1)) + int32Type = reflect.TypeOf(int32(1)) + int64Type = reflect.TypeOf(int64(1)) + + uintType = reflect.TypeOf(uint(1)) + uint8Type = reflect.TypeOf(uint8(1)) + uint16Type = reflect.TypeOf(uint16(1)) + uint32Type = reflect.TypeOf(uint32(1)) + uint64Type = reflect.TypeOf(uint64(1)) + + float32Type = reflect.TypeOf(float32(1)) + float64Type = reflect.TypeOf(float64(1)) + + stringType = reflect.TypeOf("") +) + +func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { + obj1Value := reflect.ValueOf(obj1) + obj2Value := reflect.ValueOf(obj2) + + // throughout this switch we try and avoid calling .Convert() if possible, + // as this has a pretty big performance impact + switch kind { + case reflect.Int: + { + intobj1, ok := obj1.(int) + if !ok { + intobj1 = obj1Value.Convert(intType).Interface().(int) + } + intobj2, ok := obj2.(int) + if !ok { + intobj2 = obj2Value.Convert(intType).Interface().(int) + } + if intobj1 > intobj2 { + return compareGreater, true + } + if intobj1 == intobj2 { + return compareEqual, true + } + if intobj1 < intobj2 { + return compareLess, true + } + } + case reflect.Int8: + { + int8obj1, ok := obj1.(int8) + if !ok { + int8obj1 = obj1Value.Convert(int8Type).Interface().(int8) + } + int8obj2, ok := obj2.(int8) + if !ok { + int8obj2 = obj2Value.Convert(int8Type).Interface().(int8) + } + if int8obj1 > int8obj2 { + return compareGreater, true + } + if int8obj1 == int8obj2 { + return compareEqual, true + } + if int8obj1 < int8obj2 { + return compareLess, true + } + } + case reflect.Int16: + { + int16obj1, ok := obj1.(int16) + if !ok { + int16obj1 = obj1Value.Convert(int16Type).Interface().(int16) + } + int16obj2, ok := obj2.(int16) + if !ok { + int16obj2 = obj2Value.Convert(int16Type).Interface().(int16) + } + if int16obj1 > int16obj2 { + return compareGreater, true + } + if int16obj1 == int16obj2 { + return compareEqual, true + } + if int16obj1 < int16obj2 { + return compareLess, true + } + } + case reflect.Int32: + { + int32obj1, ok := obj1.(int32) + if !ok { + int32obj1 = obj1Value.Convert(int32Type).Interface().(int32) + } + int32obj2, ok := obj2.(int32) + if !ok { + int32obj2 = obj2Value.Convert(int32Type).Interface().(int32) + } + if int32obj1 > int32obj2 { + return compareGreater, true + } + if int32obj1 == int32obj2 { + return compareEqual, true + } + if int32obj1 < int32obj2 { + return compareLess, true + } + } + case reflect.Int64: + { + int64obj1, ok := obj1.(int64) + if !ok { + int64obj1 = obj1Value.Convert(int64Type).Interface().(int64) + } + int64obj2, ok := obj2.(int64) + if !ok { + int64obj2 = obj2Value.Convert(int64Type).Interface().(int64) + } + if int64obj1 > int64obj2 { + return compareGreater, true + } + if int64obj1 == int64obj2 { + return compareEqual, true + } + if int64obj1 < int64obj2 { + return compareLess, true + } + } + case reflect.Uint: + { + uintobj1, ok := obj1.(uint) + if !ok { + uintobj1 = obj1Value.Convert(uintType).Interface().(uint) + } + uintobj2, ok := obj2.(uint) + if !ok { + uintobj2 = obj2Value.Convert(uintType).Interface().(uint) + } + if uintobj1 > uintobj2 { + return compareGreater, true + } + if uintobj1 == uintobj2 { + return compareEqual, true + } + if uintobj1 < uintobj2 { + return compareLess, true + } + } + case reflect.Uint8: + { + uint8obj1, ok := obj1.(uint8) + if !ok { + uint8obj1 = obj1Value.Convert(uint8Type).Interface().(uint8) + } + uint8obj2, ok := obj2.(uint8) + if !ok { + uint8obj2 = obj2Value.Convert(uint8Type).Interface().(uint8) + } + if uint8obj1 > uint8obj2 { + return compareGreater, true + } + if uint8obj1 == uint8obj2 { + return compareEqual, true + } + if uint8obj1 < uint8obj2 { + return compareLess, true + } + } + case reflect.Uint16: + { + uint16obj1, ok := obj1.(uint16) + if !ok { + uint16obj1 = obj1Value.Convert(uint16Type).Interface().(uint16) + } + uint16obj2, ok := obj2.(uint16) + if !ok { + uint16obj2 = obj2Value.Convert(uint16Type).Interface().(uint16) + } + if uint16obj1 > uint16obj2 { + return compareGreater, true + } + if uint16obj1 == uint16obj2 { + return compareEqual, true + } + if uint16obj1 < uint16obj2 { + return compareLess, true + } + } + case reflect.Uint32: + { + uint32obj1, ok := obj1.(uint32) + if !ok { + uint32obj1 = obj1Value.Convert(uint32Type).Interface().(uint32) + } + uint32obj2, ok := obj2.(uint32) + if !ok { + uint32obj2 = obj2Value.Convert(uint32Type).Interface().(uint32) + } + if uint32obj1 > uint32obj2 { + return compareGreater, true + } + if uint32obj1 == uint32obj2 { + return compareEqual, true + } + if uint32obj1 < uint32obj2 { + return compareLess, true + } + } + case reflect.Uint64: + { + uint64obj1, ok := obj1.(uint64) + if !ok { + uint64obj1 = obj1Value.Convert(uint64Type).Interface().(uint64) + } + uint64obj2, ok := obj2.(uint64) + if !ok { + uint64obj2 = obj2Value.Convert(uint64Type).Interface().(uint64) + } + if uint64obj1 > uint64obj2 { + return compareGreater, true + } + if uint64obj1 == uint64obj2 { + return compareEqual, true + } + if uint64obj1 < uint64obj2 { + return compareLess, true + } + } + case reflect.Float32: + { + float32obj1, ok := obj1.(float32) + if !ok { + float32obj1 = obj1Value.Convert(float32Type).Interface().(float32) + } + float32obj2, ok := obj2.(float32) + if !ok { + float32obj2 = obj2Value.Convert(float32Type).Interface().(float32) + } + if float32obj1 > float32obj2 { + return compareGreater, true + } + if float32obj1 == float32obj2 { + return compareEqual, true + } + if float32obj1 < float32obj2 { + return compareLess, true + } + } + case reflect.Float64: + { + float64obj1, ok := obj1.(float64) + if !ok { + float64obj1 = obj1Value.Convert(float64Type).Interface().(float64) + } + float64obj2, ok := obj2.(float64) + if !ok { + float64obj2 = obj2Value.Convert(float64Type).Interface().(float64) + } + if float64obj1 > float64obj2 { + return compareGreater, true + } + if float64obj1 == float64obj2 { + return compareEqual, true + } + if float64obj1 < float64obj2 { + return compareLess, true + } + } + case reflect.String: + { + stringobj1, ok := obj1.(string) + if !ok { + stringobj1 = obj1Value.Convert(stringType).Interface().(string) + } + stringobj2, ok := obj2.(string) + if !ok { + stringobj2 = obj2Value.Convert(stringType).Interface().(string) + } + if stringobj1 > stringobj2 { + return compareGreater, true + } + if stringobj1 == stringobj2 { + return compareEqual, true + } + if stringobj1 < stringobj2 { + return compareLess, true + } + } + } + + return compareEqual, false +} + +// Greater asserts that the first element is greater than the second +// +// assert.Greater(t, 2, 1) +// assert.Greater(t, float64(2), float64(1)) +// assert.Greater(t, "b", "a") +func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) +} + +// GreaterOrEqual asserts that the first element is greater than or equal to the second +// +// assert.GreaterOrEqual(t, 2, 1) +// assert.GreaterOrEqual(t, 2, 2) +// assert.GreaterOrEqual(t, "b", "a") +// assert.GreaterOrEqual(t, "b", "b") +func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) +} + +// Less asserts that the first element is less than the second +// +// assert.Less(t, 1, 2) +// assert.Less(t, float64(1), float64(2)) +// assert.Less(t, "a", "b") +func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) +} + +// LessOrEqual asserts that the first element is less than or equal to the second +// +// assert.LessOrEqual(t, 1, 2) +// assert.LessOrEqual(t, 2, 2) +// assert.LessOrEqual(t, "a", "b") +// assert.LessOrEqual(t, "b", "b") +func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) +} + +// Positive asserts that the specified element is positive +// +// assert.Positive(t, 1) +// assert.Positive(t, 1.23) +func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + zero := reflect.Zero(reflect.TypeOf(e)) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs) +} + +// Negative asserts that the specified element is negative +// +// assert.Negative(t, -1) +// assert.Negative(t, -1.23) +func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + zero := reflect.Zero(reflect.TypeOf(e)) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs) +} + +func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + e1Kind := reflect.ValueOf(e1).Kind() + e2Kind := reflect.ValueOf(e2).Kind() + if e1Kind != e2Kind { + return Fail(t, "Elements should be the same type", msgAndArgs...) + } + + compareResult, isComparable := compare(e1, e2, e1Kind) + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + } + + if !containsValue(allowedComparesResults, compareResult) { + return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...) + } + + return true +} + +func containsValue(values []CompareType, value CompareType) bool { + for _, v := range values { + if v == value { + return true + } + } + + return false +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index e0364e9..4dfd122 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -32,7 +32,8 @@ func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args return Contains(t, s, contains, append([]interface{}{msg}, args...)...) } -// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +// DirExistsf checks whether a directory exists in the given path. It also fails +// if the path is a file rather a directory or there is an error checking whether it exists. func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -92,7 +93,7 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args // EqualValuesf asserts that two objects are equal or convertable to the same types // and equal. // -// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123)) +// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -113,6 +114,24 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { return Error(t, err, append([]interface{}{msg}, args...)...) } +// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. +// This is a wrapper for errors.As. +func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...) +} + +// ErrorIsf asserts that at least one of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return ErrorIs(t, err, target, append([]interface{}{msg}, args...)...) +} + // Eventuallyf asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // @@ -126,7 +145,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick // Exactlyf asserts that two objects are equal in value and type. // -// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) +// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -160,7 +179,8 @@ func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool { return False(t, value, append([]interface{}{msg}, args...)...) } -// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +// FileExistsf checks whether a file exists in the given path. It also fails if +// the path points to a directory or there is an error when trying to check the file. func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -171,7 +191,7 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool // Greaterf asserts that the first element is greater than the second // // assert.Greaterf(t, 2, 1, "error message %s", "formatted") -// assert.Greaterf(t, float64(2, "error message %s", "formatted"), float64(1)) +// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") // assert.Greaterf(t, "b", "a", "error message %s", "formatted") func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -223,7 +243,7 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u // // assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // -// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +// Returns whether the assertion was successful (true) or not (false). func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -235,7 +255,7 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, // // assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // -// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +// Returns whether the assertion was successful (true) or not (false). func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -243,6 +263,18 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...) } +// HTTPStatusCodef asserts that a specified handler returns a specified status code. +// +// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return HTTPStatusCode(t, handler, method, url, values, statuscode, append([]interface{}{msg}, args...)...) +} + // HTTPSuccessf asserts that a specified handler returns a success status code. // // assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") @@ -257,7 +289,7 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin // Implementsf asserts that an object is implemented by the specified interface. // -// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -267,7 +299,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms // InDeltaf asserts that the two numerals are within delta of each other. // -// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) +// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -307,6 +339,54 @@ func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsil return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) } +// IsDecreasingf asserts that the collection is decreasing +// +// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") +// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") +// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsDecreasing(t, object, append([]interface{}{msg}, args...)...) +} + +// IsIncreasingf asserts that the collection is increasing +// +// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") +// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") +// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsIncreasing(t, object, append([]interface{}{msg}, args...)...) +} + +// IsNonDecreasingf asserts that the collection is not decreasing +// +// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") +// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") +// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsNonDecreasing(t, object, append([]interface{}{msg}, args...)...) +} + +// IsNonIncreasingf asserts that the collection is not increasing +// +// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") +// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") +// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...) +} + // IsTypef asserts that the specified objects are of the same type. func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -325,14 +405,6 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...) } -// YAMLEqf asserts that two YAML strings are equivalent. -func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...) -} - // Lenf asserts that the specified object has specific length. // Lenf also fails if the object has a type that len() not accept. // @@ -347,7 +419,7 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf // Lessf asserts that the first element is less than the second // // assert.Lessf(t, 1, 2, "error message %s", "formatted") -// assert.Lessf(t, float64(1, "error message %s", "formatted"), float64(2)) +// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") // assert.Lessf(t, "a", "b", "error message %s", "formatted") func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -369,6 +441,28 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args . return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...) } +// Negativef asserts that the specified element is negative +// +// assert.Negativef(t, -1, "error message %s", "formatted") +// assert.Negativef(t, -1.23, "error message %s", "formatted") +func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Negative(t, e, append([]interface{}{msg}, args...)...) +} + +// Neverf asserts that the given condition doesn't satisfy in waitFor time, +// periodically checking the target function each tick. +// +// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Never(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...) +} + // Nilf asserts that the specified object is nil. // // assert.Nilf(t, err, "error message %s", "formatted") @@ -379,6 +473,15 @@ func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool return Nil(t, object, append([]interface{}{msg}, args...)...) } +// NoDirExistsf checks whether a directory does not exist in the given path. +// It fails if the path points to an existing _directory_ only. +func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NoDirExists(t, path, append([]interface{}{msg}, args...)...) +} + // NoErrorf asserts that a function returned no error (i.e. `nil`). // // actualObj, err := SomeFunction() @@ -392,6 +495,15 @@ func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool { return NoError(t, err, append([]interface{}{msg}, args...)...) } +// NoFileExistsf checks whether a file does not exist in a given path. It fails +// if the path points to an existing _file_ only. +func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NoFileExists(t, path, append([]interface{}{msg}, args...)...) +} + // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // @@ -431,6 +543,25 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...) } +// NotEqualValuesf asserts that two objects are not equal even when converted to the same type +// +// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") +func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...) +} + // NotNilf asserts that the specified object is not nil. // // assert.NotNilf(t, err, "error message %s", "formatted") @@ -453,7 +584,7 @@ func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bo // NotRegexpf asserts that a specified regexp does not match a string. // -// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") // assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -462,6 +593,19 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args .. return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...) } +// NotSamef asserts that two pointers do not reference the same object. +// +// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) +} + // NotSubsetf asserts that the specified list(array, slice...) contains not all // elements given in the specified subset(array, slice...). // @@ -491,6 +635,18 @@ func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool return Panics(t, f, append([]interface{}{msg}, args...)...) } +// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc +// panics, and that the recovered panic value is an error that satisfies the +// EqualError comparison. +// +// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return PanicsWithError(t, errString, f, append([]interface{}{msg}, args...)...) +} + // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // @@ -502,9 +658,20 @@ func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg str return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...) } +// Positivef asserts that the specified element is positive +// +// assert.Positivef(t, 1, "error message %s", "formatted") +// assert.Positivef(t, 1.23, "error message %s", "formatted") +func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Positive(t, e, append([]interface{}{msg}, args...)...) +} + // Regexpf asserts that a specified regexp matches a string. // -// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") // assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -557,6 +724,14 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...) } +// YAMLEqf asserts that two YAML strings are equivalent. +func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...) +} + // Zerof asserts that i is the zero value for its type. func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index 2683040..25337a6 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -53,7 +53,8 @@ func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, return Containsf(a.t, s, contains, msg, args...) } -// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +// DirExists checks whether a directory exists in the given path. It also fails +// if the path is a file rather a directory or there is an error checking whether it exists. func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -61,7 +62,8 @@ func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool { return DirExists(a.t, path, msgAndArgs...) } -// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +// DirExistsf checks whether a directory exists in the given path. It also fails +// if the path is a file rather a directory or there is an error checking whether it exists. func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -167,7 +169,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn // EqualValuesf asserts that two objects are equal or convertable to the same types // and equal. // -// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123)) +// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -202,6 +204,42 @@ func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { return Error(a.t, err, msgAndArgs...) } +// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. +// This is a wrapper for errors.As. +func (a *Assertions) ErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorAs(a.t, err, target, msgAndArgs...) +} + +// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. +// This is a wrapper for errors.As. +func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorAsf(a.t, err, target, msg, args...) +} + +// ErrorIs asserts that at least one of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorIs(a.t, err, target, msgAndArgs...) +} + +// ErrorIsf asserts that at least one of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorIsf(a.t, err, target, msg, args...) +} + // Errorf asserts that a function returned an error (i.e. not `nil`). // // actualObj, err := SomeFunction() @@ -249,7 +287,7 @@ func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArg // Exactlyf asserts that two objects are equal in value and type. // -// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123)) +// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted") func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -309,7 +347,8 @@ func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool { return Falsef(a.t, value, msg, args...) } -// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +// FileExists checks whether a file exists in the given path. It also fails if +// the path points to a directory or there is an error when trying to check the file. func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -317,7 +356,8 @@ func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool { return FileExists(a.t, path, msgAndArgs...) } -// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +// FileExistsf checks whether a file exists in the given path. It also fails if +// the path points to a directory or there is an error when trying to check the file. func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -366,7 +406,7 @@ func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, // Greaterf asserts that the first element is greater than the second // // a.Greaterf(2, 1, "error message %s", "formatted") -// a.Greaterf(float64(2, "error message %s", "formatted"), float64(1)) +// a.Greaterf(float64(2), float64(1), "error message %s", "formatted") // a.Greaterf("b", "a", "error message %s", "formatted") func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { @@ -443,7 +483,7 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri // // a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // -// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +// Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -467,7 +507,7 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s // // a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // -// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +// Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -475,6 +515,30 @@ func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url return HTTPRedirectf(a.t, handler, method, url, values, msg, args...) } +// HTTPStatusCode asserts that a specified handler returns a specified status code. +// +// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPStatusCode(a.t, handler, method, url, values, statuscode, msgAndArgs...) +} + +// HTTPStatusCodef asserts that a specified handler returns a specified status code. +// +// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPStatusCodef(a.t, handler, method, url, values, statuscode, msg, args...) +} + // HTTPSuccess asserts that a specified handler returns a success status code. // // a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) @@ -511,7 +575,7 @@ func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, // Implementsf asserts that an object is implemented by the specified interface. // -// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -521,7 +585,7 @@ func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{} // InDelta asserts that the two numerals are within delta of each other. // -// a.InDelta(math.Pi, (22 / 7.0), 0.01) +// a.InDelta(math.Pi, 22/7.0, 0.01) func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -563,7 +627,7 @@ func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, del // InDeltaf asserts that the two numerals are within delta of each other. // -// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) +// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted") func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -603,6 +667,102 @@ func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilo return InEpsilonf(a.t, expected, actual, epsilon, msg, args...) } +// IsDecreasing asserts that the collection is decreasing +// +// a.IsDecreasing([]int{2, 1, 0}) +// a.IsDecreasing([]float{2, 1}) +// a.IsDecreasing([]string{"b", "a"}) +func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsDecreasing(a.t, object, msgAndArgs...) +} + +// IsDecreasingf asserts that the collection is decreasing +// +// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted") +// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted") +// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted") +func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsDecreasingf(a.t, object, msg, args...) +} + +// IsIncreasing asserts that the collection is increasing +// +// a.IsIncreasing([]int{1, 2, 3}) +// a.IsIncreasing([]float{1, 2}) +// a.IsIncreasing([]string{"a", "b"}) +func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsIncreasing(a.t, object, msgAndArgs...) +} + +// IsIncreasingf asserts that the collection is increasing +// +// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted") +// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted") +// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted") +func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsIncreasingf(a.t, object, msg, args...) +} + +// IsNonDecreasing asserts that the collection is not decreasing +// +// a.IsNonDecreasing([]int{1, 1, 2}) +// a.IsNonDecreasing([]float{1, 2}) +// a.IsNonDecreasing([]string{"a", "b"}) +func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNonDecreasing(a.t, object, msgAndArgs...) +} + +// IsNonDecreasingf asserts that the collection is not decreasing +// +// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted") +// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted") +// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted") +func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNonDecreasingf(a.t, object, msg, args...) +} + +// IsNonIncreasing asserts that the collection is not increasing +// +// a.IsNonIncreasing([]int{2, 1, 1}) +// a.IsNonIncreasing([]float{2, 1}) +// a.IsNonIncreasing([]string{"b", "a"}) +func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNonIncreasing(a.t, object, msgAndArgs...) +} + +// IsNonIncreasingf asserts that the collection is not increasing +// +// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted") +// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted") +// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted") +func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNonIncreasingf(a.t, object, msg, args...) +} + // IsType asserts that the specified objects are of the same type. func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { @@ -639,22 +799,6 @@ func (a *Assertions) JSONEqf(expected string, actual string, msg string, args .. return JSONEqf(a.t, expected, actual, msg, args...) } -// YAMLEq asserts that two YAML strings are equivalent. -func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return YAMLEq(a.t, expected, actual, msgAndArgs...) -} - -// YAMLEqf asserts that two YAML strings are equivalent. -func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return YAMLEqf(a.t, expected, actual, msg, args...) -} - // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // @@ -718,7 +862,7 @@ func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, ar // Lessf asserts that the first element is less than the second // // a.Lessf(1, 2, "error message %s", "formatted") -// a.Lessf(float64(1, "error message %s", "formatted"), float64(2)) +// a.Lessf(float64(1), float64(2), "error message %s", "formatted") // a.Lessf("a", "b", "error message %s", "formatted") func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { @@ -727,6 +871,50 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i return Lessf(a.t, e1, e2, msg, args...) } +// Negative asserts that the specified element is negative +// +// a.Negative(-1) +// a.Negative(-1.23) +func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Negative(a.t, e, msgAndArgs...) +} + +// Negativef asserts that the specified element is negative +// +// a.Negativef(-1, "error message %s", "formatted") +// a.Negativef(-1.23, "error message %s", "formatted") +func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Negativef(a.t, e, msg, args...) +} + +// Never asserts that the given condition doesn't satisfy in waitFor time, +// periodically checking the target function each tick. +// +// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond) +func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Never(a.t, condition, waitFor, tick, msgAndArgs...) +} + +// Neverf asserts that the given condition doesn't satisfy in waitFor time, +// periodically checking the target function each tick. +// +// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Neverf(a.t, condition, waitFor, tick, msg, args...) +} + // Nil asserts that the specified object is nil. // // a.Nil(err) @@ -747,6 +935,24 @@ func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) b return Nilf(a.t, object, msg, args...) } +// NoDirExists checks whether a directory does not exist in the given path. +// It fails if the path points to an existing _directory_ only. +func (a *Assertions) NoDirExists(path string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NoDirExists(a.t, path, msgAndArgs...) +} + +// NoDirExistsf checks whether a directory does not exist in the given path. +// It fails if the path points to an existing _directory_ only. +func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NoDirExistsf(a.t, path, msg, args...) +} + // NoError asserts that a function returned no error (i.e. `nil`). // // actualObj, err := SomeFunction() @@ -773,6 +979,24 @@ func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool { return NoErrorf(a.t, err, msg, args...) } +// NoFileExists checks whether a file does not exist in a given path. It fails +// if the path points to an existing _file_ only. +func (a *Assertions) NoFileExists(path string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NoFileExists(a.t, path, msgAndArgs...) +} + +// NoFileExistsf checks whether a file does not exist in a given path. It fails +// if the path points to an existing _file_ only. +func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NoFileExistsf(a.t, path, msg, args...) +} + // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // @@ -838,6 +1062,26 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr return NotEqual(a.t, expected, actual, msgAndArgs...) } +// NotEqualValues asserts that two objects are not equal even when converted to the same type +// +// a.NotEqualValues(obj1, obj2) +func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotEqualValues(a.t, expected, actual, msgAndArgs...) +} + +// NotEqualValuesf asserts that two objects are not equal even when converted to the same type +// +// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted") +func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotEqualValuesf(a.t, expected, actual, msg, args...) +} + // NotEqualf asserts that the specified values are NOT equal. // // a.NotEqualf(obj1, obj2, "error message %s", "formatted") @@ -851,6 +1095,24 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str return NotEqualf(a.t, expected, actual, msg, args...) } +// NotErrorIs asserts that at none of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorIs(a.t, err, target, msgAndArgs...) +} + +// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorIsf(a.t, err, target, msg, args...) +} + // NotNil asserts that the specified object is not nil. // // a.NotNil(err) @@ -904,7 +1166,7 @@ func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...in // NotRegexpf asserts that a specified regexp does not match a string. // -// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") // a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { @@ -913,6 +1175,32 @@ func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, arg return NotRegexpf(a.t, rx, str, msg, args...) } +// NotSame asserts that two pointers do not reference the same object. +// +// a.NotSame(ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotSame(a.t, expected, actual, msgAndArgs...) +} + +// NotSamef asserts that two pointers do not reference the same object. +// +// a.NotSamef(ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotSamef(a.t, expected, actual, msg, args...) +} + // NotSubset asserts that the specified list(array, slice...) contains not all // elements given in the specified subset(array, slice...). // @@ -961,6 +1249,30 @@ func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { return Panics(a.t, f, msgAndArgs...) } +// PanicsWithError asserts that the code inside the specified PanicTestFunc +// panics, and that the recovered panic value is an error that satisfies the +// EqualError comparison. +// +// a.PanicsWithError("crazy error", func(){ GoCrazy() }) +func (a *Assertions) PanicsWithError(errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return PanicsWithError(a.t, errString, f, msgAndArgs...) +} + +// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc +// panics, and that the recovered panic value is an error that satisfies the +// EqualError comparison. +// +// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func (a *Assertions) PanicsWithErrorf(errString string, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return PanicsWithErrorf(a.t, errString, f, msg, args...) +} + // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // @@ -993,6 +1305,28 @@ func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) b return Panicsf(a.t, f, msg, args...) } +// Positive asserts that the specified element is positive +// +// a.Positive(1) +// a.Positive(1.23) +func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Positive(a.t, e, msgAndArgs...) +} + +// Positivef asserts that the specified element is positive +// +// a.Positivef(1, "error message %s", "formatted") +// a.Positivef(1.23, "error message %s", "formatted") +func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Positivef(a.t, e, msg, args...) +} + // Regexp asserts that a specified regexp matches a string. // // a.Regexp(regexp.MustCompile("start"), "it's starting") @@ -1006,7 +1340,7 @@ func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...inter // Regexpf asserts that a specified regexp matches a string. // -// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") // a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { @@ -1103,6 +1437,22 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta return WithinDurationf(a.t, expected, actual, delta, msg, args...) } +// YAMLEq asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return YAMLEq(a.t, expected, actual, msgAndArgs...) +} + +// YAMLEqf asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return YAMLEqf(a.t, expected, actual, msg, args...) +} + // Zero asserts that i is the zero value for its type. func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 15a486c..1c3b471 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -5,305 +5,77 @@ import ( "reflect" ) -func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) { - switch kind { - case reflect.Int: - { - intobj1 := obj1.(int) - intobj2 := obj2.(int) - if intobj1 > intobj2 { - return -1, true - } - if intobj1 == intobj2 { - return 0, true - } - if intobj1 < intobj2 { - return 1, true - } - } - case reflect.Int8: - { - int8obj1 := obj1.(int8) - int8obj2 := obj2.(int8) - if int8obj1 > int8obj2 { - return -1, true - } - if int8obj1 == int8obj2 { - return 0, true - } - if int8obj1 < int8obj2 { - return 1, true - } - } - case reflect.Int16: - { - int16obj1 := obj1.(int16) - int16obj2 := obj2.(int16) - if int16obj1 > int16obj2 { - return -1, true - } - if int16obj1 == int16obj2 { - return 0, true - } - if int16obj1 < int16obj2 { - return 1, true - } - } - case reflect.Int32: - { - int32obj1 := obj1.(int32) - int32obj2 := obj2.(int32) - if int32obj1 > int32obj2 { - return -1, true - } - if int32obj1 == int32obj2 { - return 0, true - } - if int32obj1 < int32obj2 { - return 1, true - } - } - case reflect.Int64: - { - int64obj1 := obj1.(int64) - int64obj2 := obj2.(int64) - if int64obj1 > int64obj2 { - return -1, true - } - if int64obj1 == int64obj2 { - return 0, true - } - if int64obj1 < int64obj2 { - return 1, true - } - } - case reflect.Uint: - { - uintobj1 := obj1.(uint) - uintobj2 := obj2.(uint) - if uintobj1 > uintobj2 { - return -1, true - } - if uintobj1 == uintobj2 { - return 0, true - } - if uintobj1 < uintobj2 { - return 1, true - } - } - case reflect.Uint8: - { - uint8obj1 := obj1.(uint8) - uint8obj2 := obj2.(uint8) - if uint8obj1 > uint8obj2 { - return -1, true - } - if uint8obj1 == uint8obj2 { - return 0, true - } - if uint8obj1 < uint8obj2 { - return 1, true - } - } - case reflect.Uint16: - { - uint16obj1 := obj1.(uint16) - uint16obj2 := obj2.(uint16) - if uint16obj1 > uint16obj2 { - return -1, true - } - if uint16obj1 == uint16obj2 { - return 0, true - } - if uint16obj1 < uint16obj2 { - return 1, true - } - } - case reflect.Uint32: - { - uint32obj1 := obj1.(uint32) - uint32obj2 := obj2.(uint32) - if uint32obj1 > uint32obj2 { - return -1, true - } - if uint32obj1 == uint32obj2 { - return 0, true - } - if uint32obj1 < uint32obj2 { - return 1, true - } - } - case reflect.Uint64: - { - uint64obj1 := obj1.(uint64) - uint64obj2 := obj2.(uint64) - if uint64obj1 > uint64obj2 { - return -1, true - } - if uint64obj1 == uint64obj2 { - return 0, true - } - if uint64obj1 < uint64obj2 { - return 1, true - } - } - case reflect.Float32: - { - float32obj1 := obj1.(float32) - float32obj2 := obj2.(float32) - if float32obj1 > float32obj2 { - return -1, true - } - if float32obj1 == float32obj2 { - return 0, true - } - if float32obj1 < float32obj2 { - return 1, true - } - } - case reflect.Float64: - { - float64obj1 := obj1.(float64) - float64obj2 := obj2.(float64) - if float64obj1 > float64obj2 { - return -1, true - } - if float64obj1 == float64obj2 { - return 0, true - } - if float64obj1 < float64obj2 { - return 1, true - } - } - case reflect.String: - { - stringobj1 := obj1.(string) - stringobj2 := obj2.(string) - if stringobj1 > stringobj2 { - return -1, true - } - if stringobj1 == stringobj2 { - return 0, true - } - if stringobj1 < stringobj2 { - return 1, true - } - } - } - - return 0, false -} - -// Greater asserts that the first element is greater than the second -// -// assert.Greater(t, 2, 1) -// assert.Greater(t, float64(2), float64(1)) -// assert.Greater(t, "b", "a") -func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() +// isOrdered checks that collection contains orderable elements. +func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { + objKind := reflect.TypeOf(object).Kind() + if objKind != reflect.Slice && objKind != reflect.Array { + return false } - e1Kind := reflect.ValueOf(e1).Kind() - e2Kind := reflect.ValueOf(e2).Kind() - if e1Kind != e2Kind { - return Fail(t, "Elements should be the same type", msgAndArgs...) - } + objValue := reflect.ValueOf(object) + objLen := objValue.Len() - res, isComparable := compare(e1, e2, e1Kind) - if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + if objLen <= 1 { + return true } - if res != -1 { - return Fail(t, fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2), msgAndArgs...) - } + value := objValue.Index(0) + valueInterface := value.Interface() + firstValueKind := value.Kind() - return true -} + for i := 1; i < objLen; i++ { + prevValue := value + prevValueInterface := valueInterface -// GreaterOrEqual asserts that the first element is greater than or equal to the second -// -// assert.GreaterOrEqual(t, 2, 1) -// assert.GreaterOrEqual(t, 2, 2) -// assert.GreaterOrEqual(t, "b", "a") -// assert.GreaterOrEqual(t, "b", "b") -func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } + value = objValue.Index(i) + valueInterface = value.Interface() - e1Kind := reflect.ValueOf(e1).Kind() - e2Kind := reflect.ValueOf(e2).Kind() - if e1Kind != e2Kind { - return Fail(t, "Elements should be the same type", msgAndArgs...) - } + compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind) - res, isComparable := compare(e1, e2, e1Kind) - if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) - } + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...) + } - if res != -1 && res != 0 { - return Fail(t, fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2), msgAndArgs...) + if !containsValue(allowedComparesResults, compareResult) { + return Fail(t, fmt.Sprintf(failMessage, prevValue, value), msgAndArgs...) + } } return true } -// Less asserts that the first element is less than the second +// IsIncreasing asserts that the collection is increasing // -// assert.Less(t, 1, 2) -// assert.Less(t, float64(1), float64(2)) -// assert.Less(t, "a", "b") -func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - e1Kind := reflect.ValueOf(e1).Kind() - e2Kind := reflect.ValueOf(e2).Kind() - if e1Kind != e2Kind { - return Fail(t, "Elements should be the same type", msgAndArgs...) - } - - res, isComparable := compare(e1, e2, e1Kind) - if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) - } - - if res != 1 { - return Fail(t, fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2), msgAndArgs...) - } - - return true +// assert.IsIncreasing(t, []int{1, 2, 3}) +// assert.IsIncreasing(t, []float{1, 2}) +// assert.IsIncreasing(t, []string{"a", "b"}) +func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) } -// LessOrEqual asserts that the first element is less than or equal to the second +// IsNonIncreasing asserts that the collection is not increasing // -// assert.LessOrEqual(t, 1, 2) -// assert.LessOrEqual(t, 2, 2) -// assert.LessOrEqual(t, "a", "b") -// assert.LessOrEqual(t, "b", "b") -func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - e1Kind := reflect.ValueOf(e1).Kind() - e2Kind := reflect.ValueOf(e2).Kind() - if e1Kind != e2Kind { - return Fail(t, "Elements should be the same type", msgAndArgs...) - } - - res, isComparable := compare(e1, e2, e1Kind) - if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) - } +// assert.IsNonIncreasing(t, []int{2, 1, 1}) +// assert.IsNonIncreasing(t, []float{2, 1}) +// assert.IsNonIncreasing(t, []string{"b", "a"}) +func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) +} - if res != 1 && res != 0 { - return Fail(t, fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2), msgAndArgs...) - } +// IsDecreasing asserts that the collection is decreasing +// +// assert.IsDecreasing(t, []int{2, 1, 0}) +// assert.IsDecreasing(t, []float{2, 1}) +// assert.IsDecreasing(t, []string{"b", "a"}) +func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) +} - return true +// IsNonDecreasing asserts that the collection is not decreasing +// +// assert.IsNonDecreasing(t, []int{1, 1, 2}) +// assert.IsNonDecreasing(t, []float{1, 2}) +// assert.IsNonDecreasing(t, []string{"a", "b"}) +func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) } diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index 044da8b..bcac440 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -11,6 +11,7 @@ import ( "reflect" "regexp" "runtime" + "runtime/debug" "strings" "time" "unicode" @@ -18,10 +19,10 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" - yaml "gopkg.in/yaml.v2" + yaml "gopkg.in/yaml.v3" ) -//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl +//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" // TestingT is an interface wrapper around *testing.T type TestingT interface { @@ -44,7 +45,7 @@ type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool // for table driven tests. type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool -// Comparison a custom function that returns true on success and false on failure +// Comparison is a custom function that returns true on success and false on failure type Comparison func() (success bool) /* @@ -103,11 +104,11 @@ the problem actually occurred in calling code.*/ // failed. func CallerInfo() []string { - pc := uintptr(0) - file := "" - line := 0 - ok := false - name := "" + var pc uintptr + var ok bool + var file string + var line int + var name string callers := []string{} for i := 0; ; i++ { @@ -171,8 +172,8 @@ func isTest(name, prefix string) bool { if len(name) == len(prefix) { // "Test" is ok return true } - rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) - return !unicode.IsLower(rune) + r, _ := utf8.DecodeRuneInString(name[len(prefix):]) + return !unicode.IsLower(r) } func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { @@ -351,6 +352,19 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) } +// validateEqualArgs checks whether provided arguments can be safely used in the +// Equal/NotEqual functions. +func validateEqualArgs(expected, actual interface{}) error { + if expected == nil && actual == nil { + return nil + } + + if isFunction(expected) || isFunction(actual) { + return errors.New("cannot take func type as argument") + } + return nil +} + // Same asserts that two pointers reference the same object. // // assert.Same(t, ptr1, ptr2) @@ -362,18 +376,7 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b h.Helper() } - expectedPtr, actualPtr := reflect.ValueOf(expected), reflect.ValueOf(actual) - if expectedPtr.Kind() != reflect.Ptr || actualPtr.Kind() != reflect.Ptr { - return Fail(t, "Invalid operation: both arguments must be pointers", msgAndArgs...) - } - - expectedType, actualType := reflect.TypeOf(expected), reflect.TypeOf(actual) - if expectedType != actualType { - return Fail(t, fmt.Sprintf("Pointer expected to be of type %v, but was %v", - expectedType, actualType), msgAndArgs...) - } - - if expected != actual { + if !samePointers(expected, actual) { return Fail(t, fmt.Sprintf("Not same: \n"+ "expected: %p %#v\n"+ "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) @@ -382,6 +385,42 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b return true } +// NotSame asserts that two pointers do not reference the same object. +// +// assert.NotSame(t, ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if samePointers(expected, actual) { + return Fail(t, fmt.Sprintf( + "Expected and actual point to the same object: %p %#v", + expected, expected), msgAndArgs...) + } + return true +} + +// samePointers compares two generic interface objects and returns whether +// they point to the same object +func samePointers(first, second interface{}) bool { + firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second) + if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr { + return false + } + + firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second) + if firstType != secondType { + return false + } + + // compare pointer addresses + return first == second +} + // formatUnequalValues takes two values of arbitrary types and returns string // representations appropriate to be presented to the user. // @@ -390,12 +429,27 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b // to a type conversion in the Go grammar. func formatUnequalValues(expected, actual interface{}) (e string, a string) { if reflect.TypeOf(expected) != reflect.TypeOf(actual) { - return fmt.Sprintf("%T(%#v)", expected, expected), - fmt.Sprintf("%T(%#v)", actual, actual) + return fmt.Sprintf("%T(%s)", expected, truncatingFormat(expected)), + fmt.Sprintf("%T(%s)", actual, truncatingFormat(actual)) + } + switch expected.(type) { + case time.Duration: + return fmt.Sprintf("%v", expected), fmt.Sprintf("%v", actual) } + return truncatingFormat(expected), truncatingFormat(actual) +} - return fmt.Sprintf("%#v", expected), - fmt.Sprintf("%#v", actual) +// truncatingFormat formats the data and truncates it if it's too long. +// +// This helps keep formatted error messages lines from exceeding the +// bufio.MaxScanTokenSize max line length that the go testing framework imposes. +func truncatingFormat(data interface{}) string { + value := fmt.Sprintf("%#v", data) + max := bufio.MaxScanTokenSize - 100 // Give us some space the type info too if needed. + if len(value) > max { + value = value[0:max] + "<... truncated>" + } + return value } // EqualValues asserts that two objects are equal or convertable to the same types @@ -442,12 +496,12 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} // // assert.NotNil(t, err) func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } if !isNil(object) { return true } + if h, ok := t.(tHelper); ok { + h.Helper() + } return Fail(t, "Expected value not to be nil.", msgAndArgs...) } @@ -488,12 +542,12 @@ func isNil(object interface{}) bool { // // assert.Nil(t, err) func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } if isNil(object) { return true } + if h, ok := t.(tHelper); ok { + h.Helper() + } return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) } @@ -530,12 +584,11 @@ func isEmpty(object interface{}) bool { // // assert.Empty(t, obj) func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - pass := isEmpty(object) if !pass { + if h, ok := t.(tHelper); ok { + h.Helper() + } Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) } @@ -550,12 +603,11 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { // assert.Equal(t, "two", obj[1]) // } func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - pass := !isEmpty(object) if !pass { + if h, ok := t.(tHelper); ok { + h.Helper() + } Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) } @@ -598,16 +650,10 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) // // assert.True(t, myBool) func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if h, ok := t.(interface { - Helper() - }); ok { - h.Helper() - } - - if value != true { + if !value { + if h, ok := t.(tHelper); ok { + h.Helper() + } return Fail(t, "Should be true", msgAndArgs...) } @@ -619,11 +665,10 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { // // assert.False(t, myBool) func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if value != false { + if value { + if h, ok := t.(tHelper); ok { + h.Helper() + } return Fail(t, "Should be false", msgAndArgs...) } @@ -654,6 +699,21 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{ } +// NotEqualValues asserts that two objects are not equal even when converted to the same type +// +// assert.NotEqualValues(t, obj1, obj2) +func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if ObjectsAreEqualValues(expected, actual) { + return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...) + } + + return true +} + // containsElement try loop over the list check if the list includes the element. // return (false, false) if impossible. // return (true, false) if element was not found. @@ -706,10 +766,10 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo ok, found := includeElement(s, contains) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) + return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...) } if !found { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...) + return Fail(t, fmt.Sprintf("%#v does not contain %#v", s, contains), msgAndArgs...) } return true @@ -840,27 +900,39 @@ func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface return true } - aKind := reflect.TypeOf(listA).Kind() - bKind := reflect.TypeOf(listB).Kind() + if !isList(t, listA, msgAndArgs...) || !isList(t, listB, msgAndArgs...) { + return false + } + + extraA, extraB := diffLists(listA, listB) - if aKind != reflect.Array && aKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listA, aKind), msgAndArgs...) + if len(extraA) == 0 && len(extraB) == 0 { + return true } - if bKind != reflect.Array && bKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listB, bKind), msgAndArgs...) + return Fail(t, formatListDiff(listA, listB, extraA, extraB), msgAndArgs...) +} + +// isList checks that the provided value is array or slice. +func isList(t TestingT, list interface{}, msgAndArgs ...interface{}) (ok bool) { + kind := reflect.TypeOf(list).Kind() + if kind != reflect.Array && kind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s, expecting array or slice", list, kind), + msgAndArgs...) } + return true +} +// diffLists diffs two arrays/slices and returns slices of elements that are only in A and only in B. +// If some element is present multiple times, each instance is counted separately (e.g. if something is 2x in A and +// 5x in B, it will be 0x in extraA and 3x in extraB). The order of items in both lists is ignored. +func diffLists(listA, listB interface{}) (extraA, extraB []interface{}) { aValue := reflect.ValueOf(listA) bValue := reflect.ValueOf(listB) aLen := aValue.Len() bLen := bValue.Len() - if aLen != bLen { - return Fail(t, fmt.Sprintf("lengths don't match: %d != %d", aLen, bLen), msgAndArgs...) - } - // Mark indexes in bValue that we already used visited := make([]bool, bLen) for i := 0; i < aLen; i++ { @@ -877,11 +949,38 @@ func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface } } if !found { - return Fail(t, fmt.Sprintf("element %s appears more times in %s than in %s", element, aValue, bValue), msgAndArgs...) + extraA = append(extraA, element) } } - return true + for j := 0; j < bLen; j++ { + if visited[j] { + continue + } + extraB = append(extraB, bValue.Index(j).Interface()) + } + + return +} + +func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) string { + var msg bytes.Buffer + + msg.WriteString("elements differ") + if len(extraA) > 0 { + msg.WriteString("\n\nextra elements in list A:\n") + msg.WriteString(spewConfig.Sdump(extraA)) + } + if len(extraB) > 0 { + msg.WriteString("\n\nextra elements in list B:\n") + msg.WriteString(spewConfig.Sdump(extraB)) + } + msg.WriteString("\n\nlistA:\n") + msg.WriteString(spewConfig.Sdump(listA)) + msg.WriteString("\n\nlistB:\n") + msg.WriteString(spewConfig.Sdump(listB)) + + return msg.String() } // Condition uses a Comparison to assert a complex condition. @@ -901,15 +1000,17 @@ func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { type PanicTestFunc func() // didPanic returns true if the function passed to it panics. Otherwise, it returns false. -func didPanic(f PanicTestFunc) (bool, interface{}) { +func didPanic(f PanicTestFunc) (bool, interface{}, string) { didPanic := false var message interface{} + var stack string func() { defer func() { if message = recover(); message != nil { didPanic = true + stack = string(debug.Stack()) } }() @@ -918,7 +1019,7 @@ func didPanic(f PanicTestFunc) (bool, interface{}) { }() - return didPanic, message + return didPanic, message, stack } @@ -930,7 +1031,7 @@ func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { h.Helper() } - if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { + if funcDidPanic, panicValue, _ := didPanic(f); !funcDidPanic { return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) } @@ -946,12 +1047,34 @@ func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndAr h.Helper() } - funcDidPanic, panicValue := didPanic(f) + funcDidPanic, panicValue, panickedStack := didPanic(f) if !funcDidPanic { return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) } if panicValue != expected { - return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v", f, expected, panicValue), msgAndArgs...) + return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, expected, panicValue, panickedStack), msgAndArgs...) + } + + return true +} + +// PanicsWithError asserts that the code inside the specified PanicTestFunc +// panics, and that the recovered panic value is an error that satisfies the +// EqualError comparison. +// +// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) +func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + funcDidPanic, panicValue, panickedStack := didPanic(f) + if !funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) + } + panicErr, ok := panicValue.(error) + if !ok || panicErr.Error() != errString { + return Fail(t, fmt.Sprintf("func %#v should panic with error message:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, errString, panicValue, panickedStack), msgAndArgs...) } return true @@ -965,8 +1088,8 @@ func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { h.Helper() } - if funcDidPanic, panicValue := didPanic(f); funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v", f, panicValue), msgAndArgs...) + if funcDidPanic, panicValue, panickedStack := didPanic(f); funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v\n\tPanic stack:\t%s", f, panicValue, panickedStack), msgAndArgs...) } return true @@ -993,6 +1116,8 @@ func toFloat(x interface{}) (float64, bool) { xok := true switch xn := x.(type) { + case uint: + xf = float64(xn) case uint8: xf = float64(xn) case uint16: @@ -1014,7 +1139,7 @@ func toFloat(x interface{}) (float64, bool) { case float32: xf = float64(xn) case float64: - xf = float64(xn) + xf = xn case time.Duration: xf = float64(xn) default: @@ -1026,7 +1151,7 @@ func toFloat(x interface{}) (float64, bool) { // InDelta asserts that the two numerals are within delta of each other. // -// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) +// assert.InDelta(t, math.Pi, 22/7.0, 0.01) func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1128,6 +1253,9 @@ func calcRelativeError(expected, actual interface{}) (float64, error) { if !aok { return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) } + if math.IsNaN(af) { + return 0, errors.New("expected value must not be NaN") + } if af == 0 { return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") } @@ -1135,6 +1263,9 @@ func calcRelativeError(expected, actual interface{}) (float64, error) { if !bok { return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) } + if math.IsNaN(bf) { + return 0, errors.New("actual value must not be NaN") + } return math.Abs(af-bf) / math.Abs(af), nil } @@ -1144,6 +1275,9 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd if h, ok := t.(tHelper); ok { h.Helper() } + if math.IsNaN(epsilon) { + return Fail(t, "epsilon must not be NaN") + } actualEpsilon, err := calcRelativeError(expected, actual) if err != nil { return Fail(t, err.Error(), msgAndArgs...) @@ -1191,10 +1325,10 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m // assert.Equal(t, expectedObj, actualObj) // } func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } if err != nil { + if h, ok := t.(tHelper); ok { + h.Helper() + } return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...) } @@ -1208,11 +1342,10 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { // assert.Equal(t, expectedError, err) // } func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if err == nil { + if h, ok := t.(tHelper); ok { + h.Helper() + } return Fail(t, "An error is expected but got nil.", msgAndArgs...) } @@ -1314,7 +1447,8 @@ func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { return true } -// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +// FileExists checks whether a file exists in the given path. It also fails if +// the path points to a directory or there is an error when trying to check the file. func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1332,7 +1466,24 @@ func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool { return true } -// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +// NoFileExists checks whether a file does not exist in a given path. It fails +// if the path points to an existing _file_ only. +func NoFileExists(t TestingT, path string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + info, err := os.Lstat(path) + if err != nil { + return true + } + if info.IsDir() { + return true + } + return Fail(t, fmt.Sprintf("file %q exists", path), msgAndArgs...) +} + +// DirExists checks whether a directory exists in the given path. It also fails +// if the path is a file rather a directory or there is an error checking whether it exists. func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1350,6 +1501,25 @@ func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { return true } +// NoDirExists checks whether a directory does not exist in the given path. +// It fails if the path points to an existing _directory_ only. +func NoDirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + info, err := os.Lstat(path) + if err != nil { + if os.IsNotExist(err) { + return true + } + return true + } + if !info.IsDir() { + return true + } + return Fail(t, fmt.Sprintf("directory %q exists", path), msgAndArgs...) +} + // JSONEq asserts that two JSON strings are equivalent. // // assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) @@ -1439,15 +1609,6 @@ func diff(expected interface{}, actual interface{}) string { return "\n\nDiff:\n" + diff } -// validateEqualArgs checks whether provided arguments can be safely used in the -// Equal/NotEqual functions. -func validateEqualArgs(expected, actual interface{}) error { - if isFunction(expected) || isFunction(actual) { - return errors.New("cannot take func type as argument") - } - return nil -} - func isFunction(arg interface{}) bool { if arg == nil { return false @@ -1460,6 +1621,8 @@ var spewConfig = spew.ConfigState{ DisablePointerAddresses: true, DisableCapacities: true, SortKeys: true, + DisableMethods: true, + MaxDepth: 10, } type tHelper interface { @@ -1475,24 +1638,137 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t h.Helper() } + ch := make(chan bool, 1) + timer := time.NewTimer(waitFor) - ticker := time.NewTicker(tick) - checkPassed := make(chan bool) defer timer.Stop() + + ticker := time.NewTicker(tick) defer ticker.Stop() - defer close(checkPassed) - for { + + for tick := ticker.C; ; { select { case <-timer.C: return Fail(t, "Condition never satisfied", msgAndArgs...) - case result := <-checkPassed: - if result { + case <-tick: + tick = nil + go func() { ch <- condition() }() + case v := <-ch: + if v { return true } - case <-ticker.C: - go func() { - checkPassed <- condition() - }() + tick = ticker.C + } + } +} + +// Never asserts that the given condition doesn't satisfy in waitFor time, +// periodically checking the target function each tick. +// +// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) +func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + ch := make(chan bool, 1) + + timer := time.NewTimer(waitFor) + defer timer.Stop() + + ticker := time.NewTicker(tick) + defer ticker.Stop() + + for tick := ticker.C; ; { + select { + case <-timer.C: + return true + case <-tick: + tick = nil + go func() { ch <- condition() }() + case v := <-ch: + if v { + return Fail(t, "Condition satisfied", msgAndArgs...) + } + tick = ticker.C } } } + +// ErrorIs asserts that at least one of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if errors.Is(err, target) { + return true + } + + var expectedText string + if target != nil { + expectedText = target.Error() + } + + chain := buildErrorChainString(err) + + return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+ + "expected: %q\n"+ + "in chain: %s", expectedText, chain, + ), msgAndArgs...) +} + +// NotErrorIs asserts that at none of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !errors.Is(err, target) { + return true + } + + var expectedText string + if target != nil { + expectedText = target.Error() + } + + chain := buildErrorChainString(err) + + return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ + "found: %q\n"+ + "in chain: %s", expectedText, chain, + ), msgAndArgs...) +} + +// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. +// This is a wrapper for errors.As. +func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if errors.As(err, target) { + return true + } + + chain := buildErrorChainString(err) + + return Fail(t, fmt.Sprintf("Should be in error chain:\n"+ + "expected: %q\n"+ + "in chain: %s", target, chain, + ), msgAndArgs...) +} + +func buildErrorChainString(err error) string { + if err == nil { + return "" + } + + e := errors.Unwrap(err) + chain := fmt.Sprintf("%q", err.Error()) + for e != nil { + chain += fmt.Sprintf("\n\t%q", e.Error()) + e = errors.Unwrap(e) + } + return chain +} diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go index 9ad5685..df189d2 100644 --- a/vendor/github.com/stretchr/testify/assert/forward_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/forward_assertions.go @@ -13,4 +13,4 @@ func New(t TestingT) *Assertions { } } -//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs +//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs" diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go index df46fa7..4ed341d 100644 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -33,7 +33,6 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value code, err := httpCode(handler, method, url, values) if err != nil { Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) - return false } isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent @@ -56,7 +55,6 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu code, err := httpCode(handler, method, url, values) if err != nil { Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) - return false } isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect @@ -79,7 +77,6 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values code, err := httpCode(handler, method, url, values) if err != nil { Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) - return false } isErrorCode := code >= http.StatusBadRequest @@ -90,6 +87,28 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values return isErrorCode } +// HTTPStatusCode asserts that a specified handler returns a specified status code. +// +// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + code, err := httpCode(handler, method, url, values) + if err != nil { + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + } + + successful := code == statuscode + if !successful { + Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code)) + } + + return successful +} + // HTTPBody is a helper that returns HTTP body of the response. It returns // empty string if building a new request fails. func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go index b5288af..e2e6a2d 100644 --- a/vendor/github.com/stretchr/testify/mock/mock.go +++ b/vendor/github.com/stretchr/testify/mock/mock.go @@ -65,6 +65,11 @@ type Call struct { // reference. It's useful when mocking methods such as unmarshalers or // decoders. RunFn func(Arguments) + + // PanicMsg holds msg to be used to mock panic on the function call + // if the PanicMsg is set to a non nil string the function call will panic + // irrespective of other settings + PanicMsg *string } func newCall(parent *Mock, methodName string, callerInfo []string, methodArguments ...interface{}) *Call { @@ -77,6 +82,7 @@ func newCall(parent *Mock, methodName string, callerInfo []string, methodArgumen Repeatability: 0, WaitFor: nil, RunFn: nil, + PanicMsg: nil, } } @@ -100,6 +106,18 @@ func (c *Call) Return(returnArguments ...interface{}) *Call { return c } +// Panic specifies if the functon call should fail and the panic message +// +// Mock.On("DoSomething").Panic("test panic") +func (c *Call) Panic(msg string) *Call { + c.lock() + defer c.unlock() + + c.PanicMsg = &msg + + return c +} + // Once indicates that that the mock should only return the value once. // // Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() @@ -147,10 +165,10 @@ func (c *Call) After(d time.Duration) *Call { } // Run sets a handler to be called before returning. It can be used when -// mocking a method such as unmarshalers that takes a pointer to a struct and +// mocking a method (such as an unmarshaler) that takes a pointer to a struct and // sets properties in such struct // -// Mock.On("Unmarshal", AnythingOfType("*map[string]interface{}").Return().Run(func(args Arguments) { +// Mock.On("Unmarshal", AnythingOfType("*map[string]interface{}")).Return().Run(func(args Arguments) { // arg := args.Get(0).(*map[string]interface{}) // arg["foo"] = "bar" // }) @@ -279,25 +297,52 @@ func (m *Mock) findExpectedCall(method string, arguments ...interface{}) (int, * return -1, expectedCall } +type matchCandidate struct { + call *Call + mismatch string + diffCount int +} + +func (c matchCandidate) isBetterMatchThan(other matchCandidate) bool { + if c.call == nil { + return false + } + if other.call == nil { + return true + } + + if c.diffCount > other.diffCount { + return false + } + if c.diffCount < other.diffCount { + return true + } + + if c.call.Repeatability > 0 && other.call.Repeatability <= 0 { + return true + } + return false +} + func (m *Mock) findClosestCall(method string, arguments ...interface{}) (*Call, string) { - var diffCount int - var closestCall *Call - var err string + var bestMatch matchCandidate for _, call := range m.expectedCalls() { if call.Method == method { errInfo, tempDiffCount := call.Arguments.Diff(arguments) - if tempDiffCount < diffCount || diffCount == 0 { - diffCount = tempDiffCount - closestCall = call - err = errInfo + tempCandidate := matchCandidate{ + call: call, + mismatch: errInfo, + diffCount: tempDiffCount, + } + if tempCandidate.isBetterMatchThan(bestMatch) { + bestMatch = tempCandidate } - } } - return closestCall, err + return bestMatch.call, bestMatch.mismatch } func callString(method string, arguments Arguments, includeArgumentValues bool) string { @@ -392,6 +437,13 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen time.Sleep(call.waitTime) } + m.mutex.Lock() + panicMsg := call.PanicMsg + m.mutex.Unlock() + if panicMsg != nil { + panic(*panicMsg) + } + m.mutex.Lock() runFn := call.RunFn m.mutex.Unlock() @@ -527,6 +579,45 @@ func (m *Mock) AssertNotCalled(t TestingT, methodName string, arguments ...inter return true } +// IsMethodCallable checking that the method can be called +// If the method was called more than `Repeatability` return false +func (m *Mock) IsMethodCallable(t TestingT, methodName string, arguments ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + m.mutex.Lock() + defer m.mutex.Unlock() + + for _, v := range m.ExpectedCalls { + if v.Method != methodName { + continue + } + if len(arguments) != len(v.Arguments) { + continue + } + if v.Repeatability < v.totalCalls { + continue + } + if isArgsEqual(v.Arguments, arguments) { + return true + } + } + return false +} + +// isArgsEqual compares arguments +func isArgsEqual(expected Arguments, args []interface{}) bool { + if len(expected) != len(args) { + return false + } + for i, v := range args { + if !reflect.DeepEqual(expected[i], v) { + return false + } + } + return true +} + func (m *Mock) methodWasCalled(methodName string, expected []interface{}) bool { for _, call := range m.calls() { if call.Method == methodName { @@ -578,6 +669,23 @@ func AnythingOfType(t string) AnythingOfTypeArgument { return AnythingOfTypeArgument(t) } +// IsTypeArgument is a struct that contains the type of an argument +// for use when type checking. This is an alternative to AnythingOfType. +// Used in Diff and Assert. +type IsTypeArgument struct { + t interface{} +} + +// IsType returns an IsTypeArgument object containing the type to check for. +// You can provide a zero-value of the type to check. This is an +// alternative to AnythingOfType. Used in Diff and Assert. +// +// For example: +// Assert(t, IsType(""), IsType(0)) +func IsType(t interface{}) *IsTypeArgument { + return &IsTypeArgument{t: t} +} + // argumentMatcher performs custom argument matching, returning whether or // not the argument is matched by the expectation fixture function. type argumentMatcher struct { @@ -711,6 +819,12 @@ func (args Arguments) Diff(objects []interface{}) (string, int) { output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actualFmt) } + } else if reflect.TypeOf(expected) == reflect.TypeOf((*IsTypeArgument)(nil)) { + t := expected.(*IsTypeArgument).t + if reflect.TypeOf(t) != reflect.TypeOf(actual) { + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, reflect.TypeOf(t).Name(), reflect.TypeOf(actual).Name(), actualFmt) + } } else { // normal checking @@ -768,7 +882,7 @@ func (args Arguments) String(indexOrNil ...int) string { // normal String() method - return a string representation of the args var argsStr []string for _, arg := range args { - argsStr = append(argsStr, fmt.Sprintf("%s", reflect.TypeOf(arg))) + argsStr = append(argsStr, fmt.Sprintf("%T", arg)) // handles nil nicely } return strings.Join(argsStr, ",") } else if len(indexOrNil) == 1 { diff --git a/vendor/github.com/stretchr/testify/require/forward_requirements.go b/vendor/github.com/stretchr/testify/require/forward_requirements.go index ac71d40..1dcb233 100644 --- a/vendor/github.com/stretchr/testify/require/forward_requirements.go +++ b/vendor/github.com/stretchr/testify/require/forward_requirements.go @@ -13,4 +13,4 @@ func New(t TestingT) *Assertions { } } -//go:generate go run ../_codegen/main.go -output-package=require -template=require_forward.go.tmpl -include-format-funcs +//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=require -template=require_forward.go.tmpl -include-format-funcs" diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index c5903f5..51820df 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -66,7 +66,8 @@ func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args t.FailNow() } -// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +// DirExists checks whether a directory exists in the given path. It also fails +// if the path is a file rather a directory or there is an error checking whether it exists. func DirExists(t TestingT, path string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -77,7 +78,8 @@ func DirExists(t TestingT, path string, msgAndArgs ...interface{}) { t.FailNow() } -// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +// DirExistsf checks whether a directory exists in the given path. It also fails +// if the path is a file rather a directory or there is an error checking whether it exists. func DirExistsf(t TestingT, path string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -210,7 +212,7 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg // EqualValuesf asserts that two objects are equal or convertable to the same types // and equal. // -// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123)) +// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -254,6 +256,54 @@ func Error(t TestingT, err error, msgAndArgs ...interface{}) { t.FailNow() } +// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. +// This is a wrapper for errors.As. +func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorAs(t, err, target, msgAndArgs...) { + return + } + t.FailNow() +} + +// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. +// This is a wrapper for errors.As. +func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorAsf(t, err, target, msg, args...) { + return + } + t.FailNow() +} + +// ErrorIs asserts that at least one of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func ErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorIs(t, err, target, msgAndArgs...) { + return + } + t.FailNow() +} + +// ErrorIsf asserts that at least one of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorIsf(t, err, target, msg, args...) { + return + } + t.FailNow() +} + // Errorf asserts that a function returned an error (i.e. not `nil`). // // actualObj, err := SomeFunction() @@ -275,12 +325,12 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) { // // assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { - if assert.Eventually(t, condition, waitFor, tick, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Eventually(t, condition, waitFor, tick, msgAndArgs...) { + return + } t.FailNow() } @@ -289,12 +339,12 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t // // assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { - if assert.Eventuallyf(t, condition, waitFor, tick, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Eventuallyf(t, condition, waitFor, tick, msg, args...) { + return + } t.FailNow() } @@ -313,7 +363,7 @@ func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs .. // Exactlyf asserts that two objects are equal in value and type. // -// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) +// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -394,7 +444,8 @@ func Falsef(t TestingT, value bool, msg string, args ...interface{}) { t.FailNow() } -// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +// FileExists checks whether a file exists in the given path. It also fails if +// the path points to a directory or there is an error when trying to check the file. func FileExists(t TestingT, path string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -405,7 +456,8 @@ func FileExists(t TestingT, path string, msgAndArgs ...interface{}) { t.FailNow() } -// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +// FileExistsf checks whether a file exists in the given path. It also fails if +// the path points to a directory or there is an error when trying to check the file. func FileExistsf(t TestingT, path string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -466,7 +518,7 @@ func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, arg // Greaterf asserts that the first element is greater than the second // // assert.Greaterf(t, 2, 1, "error message %s", "formatted") -// assert.Greaterf(t, float64(2, "error message %s", "formatted"), float64(1)) +// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") // assert.Greaterf(t, "b", "a", "error message %s", "formatted") func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -561,7 +613,7 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, // // assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // -// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +// Returns whether the assertion was successful (true) or not (false). func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -591,7 +643,7 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url strin // // assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // -// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +// Returns whether the assertion was successful (true) or not (false). func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -602,6 +654,36 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri t.FailNow() } +// HTTPStatusCode asserts that a specified handler returns a specified status code. +// +// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.HTTPStatusCode(t, handler, method, url, values, statuscode, msgAndArgs...) { + return + } + t.FailNow() +} + +// HTTPStatusCodef asserts that a specified handler returns a specified status code. +// +// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.HTTPStatusCodef(t, handler, method, url, values, statuscode, msg, args...) { + return + } + t.FailNow() +} + // HTTPSuccess asserts that a specified handler returns a success status code. // // assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) @@ -647,7 +729,7 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg // Implementsf asserts that an object is implemented by the specified interface. // -// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -660,7 +742,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms // InDelta asserts that the two numerals are within delta of each other. // -// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) +// assert.InDelta(t, math.Pi, 22/7.0, 0.01) func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -717,7 +799,7 @@ func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta f // InDeltaf asserts that the two numerals are within delta of each other. // -// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) +// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -772,71 +854,169 @@ func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon fl t.FailNow() } -// IsType asserts that the specified objects are of the same type. -func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { +// IsDecreasing asserts that the collection is decreasing +// +// assert.IsDecreasing(t, []int{2, 1, 0}) +// assert.IsDecreasing(t, []float{2, 1}) +// assert.IsDecreasing(t, []string{"b", "a"}) +func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } - if assert.IsType(t, expectedType, object, msgAndArgs...) { + if assert.IsDecreasing(t, object, msgAndArgs...) { return } t.FailNow() } -// IsTypef asserts that the specified objects are of the same type. -func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) { +// IsDecreasingf asserts that the collection is decreasing +// +// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") +// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") +// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } - if assert.IsTypef(t, expectedType, object, msg, args...) { + if assert.IsDecreasingf(t, object, msg, args...) { return } t.FailNow() } -// JSONEq asserts that two JSON strings are equivalent. +// IsIncreasing asserts that the collection is increasing // -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { +// assert.IsIncreasing(t, []int{1, 2, 3}) +// assert.IsIncreasing(t, []float{1, 2}) +// assert.IsIncreasing(t, []string{"a", "b"}) +func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } - if assert.JSONEq(t, expected, actual, msgAndArgs...) { + if assert.IsIncreasing(t, object, msgAndArgs...) { return } t.FailNow() } -// JSONEqf asserts that two JSON strings are equivalent. +// IsIncreasingf asserts that the collection is increasing // -// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") -func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { +// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") +// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") +// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } - if assert.JSONEqf(t, expected, actual, msg, args...) { + if assert.IsIncreasingf(t, object, msg, args...) { return } t.FailNow() } -// YAMLEq asserts that two YAML strings are equivalent. -func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { +// IsNonDecreasing asserts that the collection is not decreasing +// +// assert.IsNonDecreasing(t, []int{1, 1, 2}) +// assert.IsNonDecreasing(t, []float{1, 2}) +// assert.IsNonDecreasing(t, []string{"a", "b"}) +func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } - if assert.YAMLEq(t, expected, actual, msgAndArgs...) { + if assert.IsNonDecreasing(t, object, msgAndArgs...) { return } t.FailNow() } -// YAMLEqf asserts that two YAML strings are equivalent. -func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { +// IsNonDecreasingf asserts that the collection is not decreasing +// +// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") +// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") +// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } - if assert.YAMLEqf(t, expected, actual, msg, args...) { + if assert.IsNonDecreasingf(t, object, msg, args...) { + return + } + t.FailNow() +} + +// IsNonIncreasing asserts that the collection is not increasing +// +// assert.IsNonIncreasing(t, []int{2, 1, 1}) +// assert.IsNonIncreasing(t, []float{2, 1}) +// assert.IsNonIncreasing(t, []string{"b", "a"}) +func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsNonIncreasing(t, object, msgAndArgs...) { + return + } + t.FailNow() +} + +// IsNonIncreasingf asserts that the collection is not increasing +// +// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") +// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") +// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsNonIncreasingf(t, object, msg, args...) { + return + } + t.FailNow() +} + +// IsType asserts that the specified objects are of the same type. +func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsType(t, expectedType, object, msgAndArgs...) { + return + } + t.FailNow() +} + +// IsTypef asserts that the specified objects are of the same type. +func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsTypef(t, expectedType, object, msg, args...) { + return + } + t.FailNow() +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.JSONEq(t, expected, actual, msgAndArgs...) { + return + } + t.FailNow() +} + +// JSONEqf asserts that two JSON strings are equivalent. +// +// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.JSONEqf(t, expected, actual, msg, args...) { return } t.FailNow() @@ -920,7 +1100,7 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args . // Lessf asserts that the first element is less than the second // // assert.Lessf(t, 1, 2, "error message %s", "formatted") -// assert.Lessf(t, float64(1, "error message %s", "formatted"), float64(2)) +// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") // assert.Lessf(t, "a", "b", "error message %s", "formatted") func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -932,6 +1112,62 @@ func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...inter t.FailNow() } +// Negative asserts that the specified element is negative +// +// assert.Negative(t, -1) +// assert.Negative(t, -1.23) +func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Negative(t, e, msgAndArgs...) { + return + } + t.FailNow() +} + +// Negativef asserts that the specified element is negative +// +// assert.Negativef(t, -1, "error message %s", "formatted") +// assert.Negativef(t, -1.23, "error message %s", "formatted") +func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Negativef(t, e, msg, args...) { + return + } + t.FailNow() +} + +// Never asserts that the given condition doesn't satisfy in waitFor time, +// periodically checking the target function each tick. +// +// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) +func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Never(t, condition, waitFor, tick, msgAndArgs...) { + return + } + t.FailNow() +} + +// Neverf asserts that the given condition doesn't satisfy in waitFor time, +// periodically checking the target function each tick. +// +// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Neverf(t, condition, waitFor, tick, msg, args...) { + return + } + t.FailNow() +} + // Nil asserts that the specified object is nil. // // assert.Nil(t, err) @@ -958,6 +1194,30 @@ func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) { t.FailNow() } +// NoDirExists checks whether a directory does not exist in the given path. +// It fails if the path points to an existing _directory_ only. +func NoDirExists(t TestingT, path string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NoDirExists(t, path, msgAndArgs...) { + return + } + t.FailNow() +} + +// NoDirExistsf checks whether a directory does not exist in the given path. +// It fails if the path points to an existing _directory_ only. +func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NoDirExistsf(t, path, msg, args...) { + return + } + t.FailNow() +} + // NoError asserts that a function returned no error (i.e. `nil`). // // actualObj, err := SomeFunction() @@ -990,6 +1250,30 @@ func NoErrorf(t TestingT, err error, msg string, args ...interface{}) { t.FailNow() } +// NoFileExists checks whether a file does not exist in a given path. It fails +// if the path points to an existing _file_ only. +func NoFileExists(t TestingT, path string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NoFileExists(t, path, msgAndArgs...) { + return + } + t.FailNow() +} + +// NoFileExistsf checks whether a file does not exist in a given path. It fails +// if the path points to an existing _file_ only. +func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NoFileExistsf(t, path, msg, args...) { + return + } + t.FailNow() +} + // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // @@ -1070,6 +1354,32 @@ func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs . t.FailNow() } +// NotEqualValues asserts that two objects are not equal even when converted to the same type +// +// assert.NotEqualValues(t, obj1, obj2) +func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotEqualValues(t, expected, actual, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotEqualValuesf asserts that two objects are not equal even when converted to the same type +// +// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") +func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotEqualValuesf(t, expected, actual, msg, args...) { + return + } + t.FailNow() +} + // NotEqualf asserts that the specified values are NOT equal. // // assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") @@ -1086,6 +1396,30 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, t.FailNow() } +// NotErrorIs asserts that at none of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func NotErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotErrorIs(t, err, target, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotErrorIsf(t, err, target, msg, args...) { + return + } + t.FailNow() +} + // NotNil asserts that the specified object is not nil. // // assert.NotNil(t, err) @@ -1154,7 +1488,7 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf // NotRegexpf asserts that a specified regexp does not match a string. // -// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") // assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1166,6 +1500,38 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args .. t.FailNow() } +// NotSame asserts that two pointers do not reference the same object. +// +// assert.NotSame(t, ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func NotSame(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotSame(t, expected, actual, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotSamef asserts that two pointers do not reference the same object. +// +// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotSamef(t, expected, actual, msg, args...) { + return + } + t.FailNow() +} + // NotSubset asserts that the specified list(array, slice...) contains not all // elements given in the specified subset(array, slice...). // @@ -1229,6 +1595,36 @@ func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { t.FailNow() } +// PanicsWithError asserts that the code inside the specified PanicTestFunc +// panics, and that the recovered panic value is an error that satisfies the +// EqualError comparison. +// +// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) +func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.PanicsWithError(t, errString, f, msgAndArgs...) { + return + } + t.FailNow() +} + +// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc +// panics, and that the recovered panic value is an error that satisfies the +// EqualError comparison. +// +// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.PanicsWithErrorf(t, errString, f, msg, args...) { + return + } + t.FailNow() +} + // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // @@ -1270,6 +1666,34 @@ func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{} t.FailNow() } +// Positive asserts that the specified element is positive +// +// assert.Positive(t, 1) +// assert.Positive(t, 1.23) +func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Positive(t, e, msgAndArgs...) { + return + } + t.FailNow() +} + +// Positivef asserts that the specified element is positive +// +// assert.Positivef(t, 1, "error message %s", "formatted") +// assert.Positivef(t, 1.23, "error message %s", "formatted") +func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Positivef(t, e, msg, args...) { + return + } + t.FailNow() +} + // Regexp asserts that a specified regexp matches a string. // // assert.Regexp(t, regexp.MustCompile("start"), "it's starting") @@ -1286,7 +1710,7 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface // Regexpf asserts that a specified regexp matches a string. // -// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") // assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1410,6 +1834,28 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim t.FailNow() } +// YAMLEq asserts that two YAML strings are equivalent. +func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.YAMLEq(t, expected, actual, msgAndArgs...) { + return + } + t.FailNow() +} + +// YAMLEqf asserts that two YAML strings are equivalent. +func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.YAMLEqf(t, expected, actual, msg, args...) { + return + } + t.FailNow() +} + // Zero asserts that i is the zero value for its type. func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index 804fae0..ed54a9d 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -54,7 +54,8 @@ func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, Containsf(a.t, s, contains, msg, args...) } -// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +// DirExists checks whether a directory exists in the given path. It also fails +// if the path is a file rather a directory or there is an error checking whether it exists. func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -62,7 +63,8 @@ func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) { DirExists(a.t, path, msgAndArgs...) } -// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +// DirExistsf checks whether a directory exists in the given path. It also fails +// if the path is a file rather a directory or there is an error checking whether it exists. func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -168,7 +170,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn // EqualValuesf asserts that two objects are equal or convertable to the same types // and equal. // -// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123)) +// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -203,6 +205,42 @@ func (a *Assertions) Error(err error, msgAndArgs ...interface{}) { Error(a.t, err, msgAndArgs...) } +// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. +// This is a wrapper for errors.As. +func (a *Assertions) ErrorAs(err error, target interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorAs(a.t, err, target, msgAndArgs...) +} + +// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. +// This is a wrapper for errors.As. +func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorAsf(a.t, err, target, msg, args...) +} + +// ErrorIs asserts that at least one of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorIs(a.t, err, target, msgAndArgs...) +} + +// ErrorIsf asserts that at least one of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorIsf(a.t, err, target, msg, args...) +} + // Errorf asserts that a function returned an error (i.e. not `nil`). // // actualObj, err := SomeFunction() @@ -250,7 +288,7 @@ func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArg // Exactlyf asserts that two objects are equal in value and type. // -// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123)) +// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted") func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -310,7 +348,8 @@ func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) { Falsef(a.t, value, msg, args...) } -// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +// FileExists checks whether a file exists in the given path. It also fails if +// the path points to a directory or there is an error when trying to check the file. func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -318,7 +357,8 @@ func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) { FileExists(a.t, path, msgAndArgs...) } -// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +// FileExistsf checks whether a file exists in the given path. It also fails if +// the path points to a directory or there is an error when trying to check the file. func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -367,7 +407,7 @@ func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, // Greaterf asserts that the first element is greater than the second // // a.Greaterf(2, 1, "error message %s", "formatted") -// a.Greaterf(float64(2, "error message %s", "formatted"), float64(1)) +// a.Greaterf(float64(2), float64(1), "error message %s", "formatted") // a.Greaterf("b", "a", "error message %s", "formatted") func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { @@ -444,7 +484,7 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri // // a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // -// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +// Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -468,7 +508,7 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s // // a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // -// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +// Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -476,6 +516,30 @@ func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url HTTPRedirectf(a.t, handler, method, url, values, msg, args...) } +// HTTPStatusCode asserts that a specified handler returns a specified status code. +// +// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + HTTPStatusCode(a.t, handler, method, url, values, statuscode, msgAndArgs...) +} + +// HTTPStatusCodef asserts that a specified handler returns a specified status code. +// +// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + HTTPStatusCodef(a.t, handler, method, url, values, statuscode, msg, args...) +} + // HTTPSuccess asserts that a specified handler returns a success status code. // // a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) @@ -512,7 +576,7 @@ func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, // Implementsf asserts that an object is implemented by the specified interface. // -// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -522,7 +586,7 @@ func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{} // InDelta asserts that the two numerals are within delta of each other. // -// a.InDelta(math.Pi, (22 / 7.0), 0.01) +// a.InDelta(math.Pi, 22/7.0, 0.01) func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -564,7 +628,7 @@ func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, del // InDeltaf asserts that the two numerals are within delta of each other. // -// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) +// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted") func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -604,6 +668,102 @@ func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilo InEpsilonf(a.t, expected, actual, epsilon, msg, args...) } +// IsDecreasing asserts that the collection is decreasing +// +// a.IsDecreasing([]int{2, 1, 0}) +// a.IsDecreasing([]float{2, 1}) +// a.IsDecreasing([]string{"b", "a"}) +func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsDecreasing(a.t, object, msgAndArgs...) +} + +// IsDecreasingf asserts that the collection is decreasing +// +// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted") +// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted") +// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted") +func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsDecreasingf(a.t, object, msg, args...) +} + +// IsIncreasing asserts that the collection is increasing +// +// a.IsIncreasing([]int{1, 2, 3}) +// a.IsIncreasing([]float{1, 2}) +// a.IsIncreasing([]string{"a", "b"}) +func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsIncreasing(a.t, object, msgAndArgs...) +} + +// IsIncreasingf asserts that the collection is increasing +// +// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted") +// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted") +// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted") +func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsIncreasingf(a.t, object, msg, args...) +} + +// IsNonDecreasing asserts that the collection is not decreasing +// +// a.IsNonDecreasing([]int{1, 1, 2}) +// a.IsNonDecreasing([]float{1, 2}) +// a.IsNonDecreasing([]string{"a", "b"}) +func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsNonDecreasing(a.t, object, msgAndArgs...) +} + +// IsNonDecreasingf asserts that the collection is not decreasing +// +// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted") +// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted") +// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted") +func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsNonDecreasingf(a.t, object, msg, args...) +} + +// IsNonIncreasing asserts that the collection is not increasing +// +// a.IsNonIncreasing([]int{2, 1, 1}) +// a.IsNonIncreasing([]float{2, 1}) +// a.IsNonIncreasing([]string{"b", "a"}) +func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsNonIncreasing(a.t, object, msgAndArgs...) +} + +// IsNonIncreasingf asserts that the collection is not increasing +// +// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted") +// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted") +// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted") +func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsNonIncreasingf(a.t, object, msg, args...) +} + // IsType asserts that the specified objects are of the same type. func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { @@ -640,22 +800,6 @@ func (a *Assertions) JSONEqf(expected string, actual string, msg string, args .. JSONEqf(a.t, expected, actual, msg, args...) } -// YAMLEq asserts that two YAML strings are equivalent. -func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - YAMLEq(a.t, expected, actual, msgAndArgs...) -} - -// YAMLEqf asserts that two YAML strings are equivalent. -func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - YAMLEqf(a.t, expected, actual, msg, args...) -} - // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // @@ -719,7 +863,7 @@ func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, ar // Lessf asserts that the first element is less than the second // // a.Lessf(1, 2, "error message %s", "formatted") -// a.Lessf(float64(1, "error message %s", "formatted"), float64(2)) +// a.Lessf(float64(1), float64(2), "error message %s", "formatted") // a.Lessf("a", "b", "error message %s", "formatted") func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { @@ -728,6 +872,50 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i Lessf(a.t, e1, e2, msg, args...) } +// Negative asserts that the specified element is negative +// +// a.Negative(-1) +// a.Negative(-1.23) +func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Negative(a.t, e, msgAndArgs...) +} + +// Negativef asserts that the specified element is negative +// +// a.Negativef(-1, "error message %s", "formatted") +// a.Negativef(-1.23, "error message %s", "formatted") +func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Negativef(a.t, e, msg, args...) +} + +// Never asserts that the given condition doesn't satisfy in waitFor time, +// periodically checking the target function each tick. +// +// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond) +func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Never(a.t, condition, waitFor, tick, msgAndArgs...) +} + +// Neverf asserts that the given condition doesn't satisfy in waitFor time, +// periodically checking the target function each tick. +// +// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Neverf(a.t, condition, waitFor, tick, msg, args...) +} + // Nil asserts that the specified object is nil. // // a.Nil(err) @@ -748,6 +936,24 @@ func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) { Nilf(a.t, object, msg, args...) } +// NoDirExists checks whether a directory does not exist in the given path. +// It fails if the path points to an existing _directory_ only. +func (a *Assertions) NoDirExists(path string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NoDirExists(a.t, path, msgAndArgs...) +} + +// NoDirExistsf checks whether a directory does not exist in the given path. +// It fails if the path points to an existing _directory_ only. +func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NoDirExistsf(a.t, path, msg, args...) +} + // NoError asserts that a function returned no error (i.e. `nil`). // // actualObj, err := SomeFunction() @@ -774,6 +980,24 @@ func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) { NoErrorf(a.t, err, msg, args...) } +// NoFileExists checks whether a file does not exist in a given path. It fails +// if the path points to an existing _file_ only. +func (a *Assertions) NoFileExists(path string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NoFileExists(a.t, path, msgAndArgs...) +} + +// NoFileExistsf checks whether a file does not exist in a given path. It fails +// if the path points to an existing _file_ only. +func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NoFileExistsf(a.t, path, msg, args...) +} + // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // @@ -839,6 +1063,26 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr NotEqual(a.t, expected, actual, msgAndArgs...) } +// NotEqualValues asserts that two objects are not equal even when converted to the same type +// +// a.NotEqualValues(obj1, obj2) +func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotEqualValues(a.t, expected, actual, msgAndArgs...) +} + +// NotEqualValuesf asserts that two objects are not equal even when converted to the same type +// +// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted") +func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotEqualValuesf(a.t, expected, actual, msg, args...) +} + // NotEqualf asserts that the specified values are NOT equal. // // a.NotEqualf(obj1, obj2, "error message %s", "formatted") @@ -852,6 +1096,24 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str NotEqualf(a.t, expected, actual, msg, args...) } +// NotErrorIs asserts that at none of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotErrorIs(a.t, err, target, msgAndArgs...) +} + +// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotErrorIsf(a.t, err, target, msg, args...) +} + // NotNil asserts that the specified object is not nil. // // a.NotNil(err) @@ -905,7 +1167,7 @@ func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...in // NotRegexpf asserts that a specified regexp does not match a string. // -// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") // a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { @@ -914,6 +1176,32 @@ func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, arg NotRegexpf(a.t, rx, str, msg, args...) } +// NotSame asserts that two pointers do not reference the same object. +// +// a.NotSame(ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotSame(a.t, expected, actual, msgAndArgs...) +} + +// NotSamef asserts that two pointers do not reference the same object. +// +// a.NotSamef(ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotSamef(a.t, expected, actual, msg, args...) +} + // NotSubset asserts that the specified list(array, slice...) contains not all // elements given in the specified subset(array, slice...). // @@ -962,6 +1250,30 @@ func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { Panics(a.t, f, msgAndArgs...) } +// PanicsWithError asserts that the code inside the specified PanicTestFunc +// panics, and that the recovered panic value is an error that satisfies the +// EqualError comparison. +// +// a.PanicsWithError("crazy error", func(){ GoCrazy() }) +func (a *Assertions) PanicsWithError(errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + PanicsWithError(a.t, errString, f, msgAndArgs...) +} + +// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc +// panics, and that the recovered panic value is an error that satisfies the +// EqualError comparison. +// +// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func (a *Assertions) PanicsWithErrorf(errString string, f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + PanicsWithErrorf(a.t, errString, f, msg, args...) +} + // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // @@ -994,6 +1306,28 @@ func (a *Assertions) Panicsf(f assert.PanicTestFunc, msg string, args ...interfa Panicsf(a.t, f, msg, args...) } +// Positive asserts that the specified element is positive +// +// a.Positive(1) +// a.Positive(1.23) +func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Positive(a.t, e, msgAndArgs...) +} + +// Positivef asserts that the specified element is positive +// +// a.Positivef(1, "error message %s", "formatted") +// a.Positivef(1.23, "error message %s", "formatted") +func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Positivef(a.t, e, msg, args...) +} + // Regexp asserts that a specified regexp matches a string. // // a.Regexp(regexp.MustCompile("start"), "it's starting") @@ -1007,7 +1341,7 @@ func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...inter // Regexpf asserts that a specified regexp matches a string. // -// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") // a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { @@ -1104,6 +1438,22 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta WithinDurationf(a.t, expected, actual, delta, msg, args...) } +// YAMLEq asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + YAMLEq(a.t, expected, actual, msgAndArgs...) +} + +// YAMLEqf asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + YAMLEqf(a.t, expected, actual, msg, args...) +} + // Zero asserts that i is the zero value for its type. func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/require/requirements.go b/vendor/github.com/stretchr/testify/require/requirements.go index 6b85c5e..91772df 100644 --- a/vendor/github.com/stretchr/testify/require/requirements.go +++ b/vendor/github.com/stretchr/testify/require/requirements.go @@ -26,4 +26,4 @@ type BoolAssertionFunc func(TestingT, bool, ...interface{}) // for table driven tests. type ErrorAssertionFunc func(TestingT, error, ...interface{}) -//go:generate go run ../_codegen/main.go -output-package=require -template=require.go.tmpl -include-format-funcs +//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=require -template=require.go.tmpl -include-format-funcs" diff --git a/vendor/github.com/tinylib/msgp/LICENSE b/vendor/github.com/tinylib/msgp/LICENSE new file mode 100644 index 0000000..14d6042 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2014 Philip Hofer +Portions Copyright (c) 2009 The Go Authors (license at http://golang.org) where indicated + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_linux.go b/vendor/github.com/tinylib/msgp/msgp/advise_linux.go new file mode 100644 index 0000000..6c6bb37 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/advise_linux.go @@ -0,0 +1,24 @@ +// +build linux,!appengine + +package msgp + +import ( + "os" + "syscall" +) + +func adviseRead(mem []byte) { + syscall.Madvise(mem, syscall.MADV_SEQUENTIAL|syscall.MADV_WILLNEED) +} + +func adviseWrite(mem []byte) { + syscall.Madvise(mem, syscall.MADV_SEQUENTIAL) +} + +func fallocate(f *os.File, sz int64) error { + err := syscall.Fallocate(int(f.Fd()), 0, 0, sz) + if err == syscall.ENOTSUP { + return f.Truncate(sz) + } + return err +} diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_other.go b/vendor/github.com/tinylib/msgp/msgp/advise_other.go new file mode 100644 index 0000000..da65ea5 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/advise_other.go @@ -0,0 +1,17 @@ +// +build !linux appengine + +package msgp + +import ( + "os" +) + +// TODO: darwin, BSD support + +func adviseRead(mem []byte) {} + +func adviseWrite(mem []byte) {} + +func fallocate(f *os.File, sz int64) error { + return f.Truncate(sz) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/circular.go b/vendor/github.com/tinylib/msgp/msgp/circular.go new file mode 100644 index 0000000..a0434c7 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/circular.go @@ -0,0 +1,39 @@ +package msgp + +type timer interface { + StartTimer() + StopTimer() +} + +// EndlessReader is an io.Reader +// that loops over the same data +// endlessly. It is used for benchmarking. +type EndlessReader struct { + tb timer + data []byte + offset int +} + +// NewEndlessReader returns a new endless reader +func NewEndlessReader(b []byte, tb timer) *EndlessReader { + return &EndlessReader{tb: tb, data: b, offset: 0} +} + +// Read implements io.Reader. In practice, it +// always returns (len(p), nil), although it +// fills the supplied slice while the benchmark +// timer is stopped. +func (c *EndlessReader) Read(p []byte) (int, error) { + c.tb.StopTimer() + var n int + l := len(p) + m := len(c.data) + for n < l { + nn := copy(p[n:], c.data[c.offset:]) + n += nn + c.offset += nn + c.offset %= m + } + c.tb.StartTimer() + return n, nil +} diff --git a/vendor/github.com/tinylib/msgp/msgp/defs.go b/vendor/github.com/tinylib/msgp/msgp/defs.go new file mode 100644 index 0000000..c634eef --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/defs.go @@ -0,0 +1,142 @@ +// This package is the support library for the msgp code generator (http://github.com/tinylib/msgp). +// +// This package defines the utilites used by the msgp code generator for encoding and decoding MessagePack +// from []byte and io.Reader/io.Writer types. Much of this package is devoted to helping the msgp code +// generator implement the Marshaler/Unmarshaler and Encodable/Decodable interfaces. +// +// This package defines four "families" of functions: +// - AppendXxxx() appends an object to a []byte in MessagePack encoding. +// - ReadXxxxBytes() reads an object from a []byte and returns the remaining bytes. +// - (*Writer).WriteXxxx() writes an object to the buffered *Writer type. +// - (*Reader).ReadXxxx() reads an object from a buffered *Reader type. +// +// Once a type has satisfied the `Encodable` and `Decodable` interfaces, +// it can be written and read from arbitrary `io.Writer`s and `io.Reader`s using +// msgp.Encode(io.Writer, msgp.Encodable) +// and +// msgp.Decode(io.Reader, msgp.Decodable) +// +// There are also methods for converting MessagePack to JSON without +// an explicit de-serialization step. +// +// For additional tips, tricks, and gotchas, please visit +// the wiki at http://github.com/tinylib/msgp +package msgp + +const last4 = 0x0f +const first4 = 0xf0 +const last5 = 0x1f +const first3 = 0xe0 +const last7 = 0x7f + +func isfixint(b byte) bool { + return b>>7 == 0 +} + +func isnfixint(b byte) bool { + return b&first3 == mnfixint +} + +func isfixmap(b byte) bool { + return b&first4 == mfixmap +} + +func isfixarray(b byte) bool { + return b&first4 == mfixarray +} + +func isfixstr(b byte) bool { + return b&first3 == mfixstr +} + +func wfixint(u uint8) byte { + return u & last7 +} + +func rfixint(b byte) uint8 { + return b +} + +func wnfixint(i int8) byte { + return byte(i) | mnfixint +} + +func rnfixint(b byte) int8 { + return int8(b) +} + +func rfixmap(b byte) uint8 { + return b & last4 +} + +func wfixmap(u uint8) byte { + return mfixmap | (u & last4) +} + +func rfixstr(b byte) uint8 { + return b & last5 +} + +func wfixstr(u uint8) byte { + return (u & last5) | mfixstr +} + +func rfixarray(b byte) uint8 { + return (b & last4) +} + +func wfixarray(u uint8) byte { + return (u & last4) | mfixarray +} + +// These are all the byte +// prefixes defined by the +// msgpack standard +const ( + // 0XXXXXXX + mfixint uint8 = 0x00 + + // 111XXXXX + mnfixint uint8 = 0xe0 + + // 1000XXXX + mfixmap uint8 = 0x80 + + // 1001XXXX + mfixarray uint8 = 0x90 + + // 101XXXXX + mfixstr uint8 = 0xa0 + + mnil uint8 = 0xc0 + mfalse uint8 = 0xc2 + mtrue uint8 = 0xc3 + mbin8 uint8 = 0xc4 + mbin16 uint8 = 0xc5 + mbin32 uint8 = 0xc6 + mext8 uint8 = 0xc7 + mext16 uint8 = 0xc8 + mext32 uint8 = 0xc9 + mfloat32 uint8 = 0xca + mfloat64 uint8 = 0xcb + muint8 uint8 = 0xcc + muint16 uint8 = 0xcd + muint32 uint8 = 0xce + muint64 uint8 = 0xcf + mint8 uint8 = 0xd0 + mint16 uint8 = 0xd1 + mint32 uint8 = 0xd2 + mint64 uint8 = 0xd3 + mfixext1 uint8 = 0xd4 + mfixext2 uint8 = 0xd5 + mfixext4 uint8 = 0xd6 + mfixext8 uint8 = 0xd7 + mfixext16 uint8 = 0xd8 + mstr8 uint8 = 0xd9 + mstr16 uint8 = 0xda + mstr32 uint8 = 0xdb + marray16 uint8 = 0xdc + marray32 uint8 = 0xdd + mmap16 uint8 = 0xde + mmap32 uint8 = 0xdf +) diff --git a/vendor/github.com/tinylib/msgp/msgp/edit.go b/vendor/github.com/tinylib/msgp/msgp/edit.go new file mode 100644 index 0000000..b473a6f --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/edit.go @@ -0,0 +1,242 @@ +package msgp + +import ( + "math" +) + +// Locate returns a []byte pointing to the field +// in a messagepack map with the provided key. (The returned []byte +// points to a sub-slice of 'raw'; Locate does no allocations.) If the +// key doesn't exist in the map, a zero-length []byte will be returned. +func Locate(key string, raw []byte) []byte { + s, n := locate(raw, key) + return raw[s:n] +} + +// Replace takes a key ("key") in a messagepack map ("raw") +// and replaces its value with the one provided and returns +// the new []byte. The returned []byte may point to the same +// memory as "raw". Replace makes no effort to evaluate the validity +// of the contents of 'val'. It may use up to the full capacity of 'raw.' +// Replace returns 'nil' if the field doesn't exist or if the object in 'raw' +// is not a map. +func Replace(key string, raw []byte, val []byte) []byte { + start, end := locate(raw, key) + if start == end { + return nil + } + return replace(raw, start, end, val, true) +} + +// CopyReplace works similarly to Replace except that the returned +// byte slice does not point to the same memory as 'raw'. CopyReplace +// returns 'nil' if the field doesn't exist or 'raw' isn't a map. +func CopyReplace(key string, raw []byte, val []byte) []byte { + start, end := locate(raw, key) + if start == end { + return nil + } + return replace(raw, start, end, val, false) +} + +// Remove removes a key-value pair from 'raw'. It returns +// 'raw' unchanged if the key didn't exist. +func Remove(key string, raw []byte) []byte { + start, end := locateKV(raw, key) + if start == end { + return raw + } + raw = raw[:start+copy(raw[start:], raw[end:])] + return resizeMap(raw, -1) +} + +// HasKey returns whether the map in 'raw' has +// a field with key 'key' +func HasKey(key string, raw []byte) bool { + sz, bts, err := ReadMapHeaderBytes(raw) + if err != nil { + return false + } + var field []byte + for i := uint32(0); i < sz; i++ { + field, bts, err = ReadStringZC(bts) + if err != nil { + return false + } + if UnsafeString(field) == key { + return true + } + } + return false +} + +func replace(raw []byte, start int, end int, val []byte, inplace bool) []byte { + ll := end - start // length of segment to replace + lv := len(val) + + if inplace { + extra := lv - ll + + // fastest case: we're doing + // a 1:1 replacement + if extra == 0 { + copy(raw[start:], val) + return raw + + } else if extra < 0 { + // 'val' smaller than replaced value + // copy in place and shift back + + x := copy(raw[start:], val) + y := copy(raw[start+x:], raw[end:]) + return raw[:start+x+y] + + } else if extra < cap(raw)-len(raw) { + // 'val' less than (cap-len) extra bytes + // copy in place and shift forward + raw = raw[0 : len(raw)+extra] + // shift end forward + copy(raw[end+extra:], raw[end:]) + copy(raw[start:], val) + return raw + } + } + + // we have to allocate new space + out := make([]byte, len(raw)+len(val)-ll) + x := copy(out, raw[:start]) + y := copy(out[x:], val) + copy(out[x+y:], raw[end:]) + return out +} + +// locate does a naive O(n) search for the map key; returns start, end +// (returns 0,0 on error) +func locate(raw []byte, key string) (start int, end int) { + var ( + sz uint32 + bts []byte + field []byte + err error + ) + sz, bts, err = ReadMapHeaderBytes(raw) + if err != nil { + return + } + + // loop and locate field + for i := uint32(0); i < sz; i++ { + field, bts, err = ReadStringZC(bts) + if err != nil { + return 0, 0 + } + if UnsafeString(field) == key { + // start location + l := len(raw) + start = l - len(bts) + bts, err = Skip(bts) + if err != nil { + return 0, 0 + } + end = l - len(bts) + return + } + bts, err = Skip(bts) + if err != nil { + return 0, 0 + } + } + return 0, 0 +} + +// locate key AND value +func locateKV(raw []byte, key string) (start int, end int) { + var ( + sz uint32 + bts []byte + field []byte + err error + ) + sz, bts, err = ReadMapHeaderBytes(raw) + if err != nil { + return 0, 0 + } + + for i := uint32(0); i < sz; i++ { + tmp := len(bts) + field, bts, err = ReadStringZC(bts) + if err != nil { + return 0, 0 + } + if UnsafeString(field) == key { + start = len(raw) - tmp + bts, err = Skip(bts) + if err != nil { + return 0, 0 + } + end = len(raw) - len(bts) + return + } + bts, err = Skip(bts) + if err != nil { + return 0, 0 + } + } + return 0, 0 +} + +// delta is delta on map size +func resizeMap(raw []byte, delta int64) []byte { + var sz int64 + switch raw[0] { + case mmap16: + sz = int64(big.Uint16(raw[1:])) + if sz+delta <= math.MaxUint16 { + big.PutUint16(raw[1:], uint16(sz+delta)) + return raw + } + if cap(raw)-len(raw) >= 2 { + raw = raw[0 : len(raw)+2] + copy(raw[5:], raw[3:]) + raw[0] = mmap32 + big.PutUint32(raw[1:], uint32(sz+delta)) + return raw + } + n := make([]byte, 0, len(raw)+5) + n = AppendMapHeader(n, uint32(sz+delta)) + return append(n, raw[3:]...) + + case mmap32: + sz = int64(big.Uint32(raw[1:])) + big.PutUint32(raw[1:], uint32(sz+delta)) + return raw + + default: + sz = int64(rfixmap(raw[0])) + if sz+delta < 16 { + raw[0] = wfixmap(uint8(sz + delta)) + return raw + } else if sz+delta <= math.MaxUint16 { + if cap(raw)-len(raw) >= 2 { + raw = raw[0 : len(raw)+2] + copy(raw[3:], raw[1:]) + raw[0] = mmap16 + big.PutUint16(raw[1:], uint16(sz+delta)) + return raw + } + n := make([]byte, 0, len(raw)+5) + n = AppendMapHeader(n, uint32(sz+delta)) + return append(n, raw[1:]...) + } + if cap(raw)-len(raw) >= 4 { + raw = raw[0 : len(raw)+4] + copy(raw[5:], raw[1:]) + raw[0] = mmap32 + big.PutUint32(raw[1:], uint32(sz+delta)) + return raw + } + n := make([]byte, 0, len(raw)+5) + n = AppendMapHeader(n, uint32(sz+delta)) + return append(n, raw[1:]...) + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/elsize.go b/vendor/github.com/tinylib/msgp/msgp/elsize.go new file mode 100644 index 0000000..95762e7 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/elsize.go @@ -0,0 +1,99 @@ +package msgp + +// size of every object on the wire, +// plus type information. gives us +// constant-time type information +// for traversing composite objects. +// +var sizes = [256]bytespec{ + mnil: {size: 1, extra: constsize, typ: NilType}, + mfalse: {size: 1, extra: constsize, typ: BoolType}, + mtrue: {size: 1, extra: constsize, typ: BoolType}, + mbin8: {size: 2, extra: extra8, typ: BinType}, + mbin16: {size: 3, extra: extra16, typ: BinType}, + mbin32: {size: 5, extra: extra32, typ: BinType}, + mext8: {size: 3, extra: extra8, typ: ExtensionType}, + mext16: {size: 4, extra: extra16, typ: ExtensionType}, + mext32: {size: 6, extra: extra32, typ: ExtensionType}, + mfloat32: {size: 5, extra: constsize, typ: Float32Type}, + mfloat64: {size: 9, extra: constsize, typ: Float64Type}, + muint8: {size: 2, extra: constsize, typ: UintType}, + muint16: {size: 3, extra: constsize, typ: UintType}, + muint32: {size: 5, extra: constsize, typ: UintType}, + muint64: {size: 9, extra: constsize, typ: UintType}, + mint8: {size: 2, extra: constsize, typ: IntType}, + mint16: {size: 3, extra: constsize, typ: IntType}, + mint32: {size: 5, extra: constsize, typ: IntType}, + mint64: {size: 9, extra: constsize, typ: IntType}, + mfixext1: {size: 3, extra: constsize, typ: ExtensionType}, + mfixext2: {size: 4, extra: constsize, typ: ExtensionType}, + mfixext4: {size: 6, extra: constsize, typ: ExtensionType}, + mfixext8: {size: 10, extra: constsize, typ: ExtensionType}, + mfixext16: {size: 18, extra: constsize, typ: ExtensionType}, + mstr8: {size: 2, extra: extra8, typ: StrType}, + mstr16: {size: 3, extra: extra16, typ: StrType}, + mstr32: {size: 5, extra: extra32, typ: StrType}, + marray16: {size: 3, extra: array16v, typ: ArrayType}, + marray32: {size: 5, extra: array32v, typ: ArrayType}, + mmap16: {size: 3, extra: map16v, typ: MapType}, + mmap32: {size: 5, extra: map32v, typ: MapType}, +} + +func init() { + // set up fixed fields + + // fixint + for i := mfixint; i < 0x80; i++ { + sizes[i] = bytespec{size: 1, extra: constsize, typ: IntType} + } + + // nfixint + for i := uint16(mnfixint); i < 0x100; i++ { + sizes[uint8(i)] = bytespec{size: 1, extra: constsize, typ: IntType} + } + + // fixstr gets constsize, + // since the prefix yields the size + for i := mfixstr; i < 0xc0; i++ { + sizes[i] = bytespec{size: 1 + rfixstr(i), extra: constsize, typ: StrType} + } + + // fixmap + for i := mfixmap; i < 0x90; i++ { + sizes[i] = bytespec{size: 1, extra: varmode(2 * rfixmap(i)), typ: MapType} + } + + // fixarray + for i := mfixarray; i < 0xa0; i++ { + sizes[i] = bytespec{size: 1, extra: varmode(rfixarray(i)), typ: ArrayType} + } +} + +// a valid bytespsec has +// non-zero 'size' and +// non-zero 'typ' +type bytespec struct { + size uint8 // prefix size information + extra varmode // extra size information + typ Type // type + _ byte // makes bytespec 4 bytes (yes, this matters) +} + +// size mode +// if positive, # elements for composites +type varmode int8 + +const ( + constsize varmode = 0 // constant size (size bytes + uint8(varmode) objects) + extra8 = -1 // has uint8(p[1]) extra bytes + extra16 = -2 // has be16(p[1:]) extra bytes + extra32 = -3 // has be32(p[1:]) extra bytes + map16v = -4 // use map16 + map32v = -5 // use map32 + array16v = -6 // use array16 + array32v = -7 // use array32 +) + +func getType(v byte) Type { + return sizes[v].typ +} diff --git a/vendor/github.com/tinylib/msgp/msgp/errors.go b/vendor/github.com/tinylib/msgp/msgp/errors.go new file mode 100644 index 0000000..921e855 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/errors.go @@ -0,0 +1,317 @@ +package msgp + +import ( + "fmt" + "reflect" +) + +const resumableDefault = false + +var ( + // ErrShortBytes is returned when the + // slice being decoded is too short to + // contain the contents of the message + ErrShortBytes error = errShort{} + + // this error is only returned + // if we reach code that should + // be unreachable + fatal error = errFatal{} +) + +// Error is the interface satisfied +// by all of the errors that originate +// from this package. +type Error interface { + error + + // Resumable returns whether + // or not the error means that + // the stream of data is malformed + // and the information is unrecoverable. + Resumable() bool +} + +// contextError allows msgp Error instances to be enhanced with additional +// context about their origin. +type contextError interface { + Error + + // withContext must not modify the error instance - it must clone and + // return a new error with the context added. + withContext(ctx string) error +} + +// Cause returns the underlying cause of an error that has been wrapped +// with additional context. +func Cause(e error) error { + out := e + if e, ok := e.(errWrapped); ok && e.cause != nil { + out = e.cause + } + return out +} + +// Resumable returns whether or not the error means that the stream of data is +// malformed and the information is unrecoverable. +func Resumable(e error) bool { + if e, ok := e.(Error); ok { + return e.Resumable() + } + return resumableDefault +} + +// WrapError wraps an error with additional context that allows the part of the +// serialized type that caused the problem to be identified. Underlying errors +// can be retrieved using Cause() +// +// The input error is not modified - a new error should be returned. +// +// ErrShortBytes is not wrapped with any context due to backward compatibility +// issues with the public API. +// +func WrapError(err error, ctx ...interface{}) error { + switch e := err.(type) { + case errShort: + return e + case contextError: + return e.withContext(ctxString(ctx)) + default: + return errWrapped{cause: err, ctx: ctxString(ctx)} + } +} + +// ctxString converts the incoming interface{} slice into a single string. +func ctxString(ctx []interface{}) string { + out := "" + for idx, cv := range ctx { + if idx > 0 { + out += "/" + } + out += fmt.Sprintf("%v", cv) + } + return out +} + +func addCtx(ctx, add string) string { + if ctx != "" { + return add + "/" + ctx + } else { + return add + } +} + +// errWrapped allows arbitrary errors passed to WrapError to be enhanced with +// context and unwrapped with Cause() +type errWrapped struct { + cause error + ctx string +} + +func (e errWrapped) Error() string { + if e.ctx != "" { + return fmt.Sprintf("%s at %s", e.cause, e.ctx) + } else { + return e.cause.Error() + } +} + +func (e errWrapped) Resumable() bool { + if e, ok := e.cause.(Error); ok { + return e.Resumable() + } + return resumableDefault +} + +// Unwrap returns the cause. +func (e errWrapped) Unwrap() error { return e.cause } + +type errShort struct{} + +func (e errShort) Error() string { return "msgp: too few bytes left to read object" } +func (e errShort) Resumable() bool { return false } + +type errFatal struct { + ctx string +} + +func (f errFatal) Error() string { + out := "msgp: fatal decoding error (unreachable code)" + if f.ctx != "" { + out += " at " + f.ctx + } + return out +} + +func (f errFatal) Resumable() bool { return false } + +func (f errFatal) withContext(ctx string) error { f.ctx = addCtx(f.ctx, ctx); return f } + +// ArrayError is an error returned +// when decoding a fix-sized array +// of the wrong size +type ArrayError struct { + Wanted uint32 + Got uint32 + ctx string +} + +// Error implements the error interface +func (a ArrayError) Error() string { + out := fmt.Sprintf("msgp: wanted array of size %d; got %d", a.Wanted, a.Got) + if a.ctx != "" { + out += " at " + a.ctx + } + return out +} + +// Resumable is always 'true' for ArrayErrors +func (a ArrayError) Resumable() bool { return true } + +func (a ArrayError) withContext(ctx string) error { a.ctx = addCtx(a.ctx, ctx); return a } + +// IntOverflow is returned when a call +// would downcast an integer to a type +// with too few bits to hold its value. +type IntOverflow struct { + Value int64 // the value of the integer + FailedBitsize int // the bit size that the int64 could not fit into + ctx string +} + +// Error implements the error interface +func (i IntOverflow) Error() string { + str := fmt.Sprintf("msgp: %d overflows int%d", i.Value, i.FailedBitsize) + if i.ctx != "" { + str += " at " + i.ctx + } + return str +} + +// Resumable is always 'true' for overflows +func (i IntOverflow) Resumable() bool { return true } + +func (i IntOverflow) withContext(ctx string) error { i.ctx = addCtx(i.ctx, ctx); return i } + +// UintOverflow is returned when a call +// would downcast an unsigned integer to a type +// with too few bits to hold its value +type UintOverflow struct { + Value uint64 // value of the uint + FailedBitsize int // the bit size that couldn't fit the value + ctx string +} + +// Error implements the error interface +func (u UintOverflow) Error() string { + str := fmt.Sprintf("msgp: %d overflows uint%d", u.Value, u.FailedBitsize) + if u.ctx != "" { + str += " at " + u.ctx + } + return str +} + +// Resumable is always 'true' for overflows +func (u UintOverflow) Resumable() bool { return true } + +func (u UintOverflow) withContext(ctx string) error { u.ctx = addCtx(u.ctx, ctx); return u } + +// UintBelowZero is returned when a call +// would cast a signed integer below zero +// to an unsigned integer. +type UintBelowZero struct { + Value int64 // value of the incoming int + ctx string +} + +// Error implements the error interface +func (u UintBelowZero) Error() string { + str := fmt.Sprintf("msgp: attempted to cast int %d to unsigned", u.Value) + if u.ctx != "" { + str += " at " + u.ctx + } + return str +} + +// Resumable is always 'true' for overflows +func (u UintBelowZero) Resumable() bool { return true } + +func (u UintBelowZero) withContext(ctx string) error { + u.ctx = ctx + return u +} + +// A TypeError is returned when a particular +// decoding method is unsuitable for decoding +// a particular MessagePack value. +type TypeError struct { + Method Type // Type expected by method + Encoded Type // Type actually encoded + + ctx string +} + +// Error implements the error interface +func (t TypeError) Error() string { + out := fmt.Sprintf("msgp: attempted to decode type %q with method for %q", t.Encoded, t.Method) + if t.ctx != "" { + out += " at " + t.ctx + } + return out +} + +// Resumable returns 'true' for TypeErrors +func (t TypeError) Resumable() bool { return true } + +func (t TypeError) withContext(ctx string) error { t.ctx = addCtx(t.ctx, ctx); return t } + +// returns either InvalidPrefixError or +// TypeError depending on whether or not +// the prefix is recognized +func badPrefix(want Type, lead byte) error { + t := sizes[lead].typ + if t == InvalidType { + return InvalidPrefixError(lead) + } + return TypeError{Method: want, Encoded: t} +} + +// InvalidPrefixError is returned when a bad encoding +// uses a prefix that is not recognized in the MessagePack standard. +// This kind of error is unrecoverable. +type InvalidPrefixError byte + +// Error implements the error interface +func (i InvalidPrefixError) Error() string { + return fmt.Sprintf("msgp: unrecognized type prefix 0x%x", byte(i)) +} + +// Resumable returns 'false' for InvalidPrefixErrors +func (i InvalidPrefixError) Resumable() bool { return false } + +// ErrUnsupportedType is returned +// when a bad argument is supplied +// to a function that takes `interface{}`. +type ErrUnsupportedType struct { + T reflect.Type + + ctx string +} + +// Error implements error +func (e *ErrUnsupportedType) Error() string { + out := fmt.Sprintf("msgp: type %q not supported", e.T) + if e.ctx != "" { + out += " at " + e.ctx + } + return out +} + +// Resumable returns 'true' for ErrUnsupportedType +func (e *ErrUnsupportedType) Resumable() bool { return true } + +func (e *ErrUnsupportedType) withContext(ctx string) error { + o := *e + o.ctx = addCtx(o.ctx, ctx) + return &o +} diff --git a/vendor/github.com/tinylib/msgp/msgp/extension.go b/vendor/github.com/tinylib/msgp/msgp/extension.go new file mode 100644 index 0000000..b2e1108 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/extension.go @@ -0,0 +1,549 @@ +package msgp + +import ( + "fmt" + "math" +) + +const ( + // Complex64Extension is the extension number used for complex64 + Complex64Extension = 3 + + // Complex128Extension is the extension number used for complex128 + Complex128Extension = 4 + + // TimeExtension is the extension number used for time.Time + TimeExtension = 5 +) + +// our extensions live here +var extensionReg = make(map[int8]func() Extension) + +// RegisterExtension registers extensions so that they +// can be initialized and returned by methods that +// decode `interface{}` values. This should only +// be called during initialization. f() should return +// a newly-initialized zero value of the extension. Keep in +// mind that extensions 3, 4, and 5 are reserved for +// complex64, complex128, and time.Time, respectively, +// and that MessagePack reserves extension types from -127 to -1. +// +// For example, if you wanted to register a user-defined struct: +// +// msgp.RegisterExtension(10, func() msgp.Extension { &MyExtension{} }) +// +// RegisterExtension will panic if you call it multiple times +// with the same 'typ' argument, or if you use a reserved +// type (3, 4, or 5). +func RegisterExtension(typ int8, f func() Extension) { + switch typ { + case Complex64Extension, Complex128Extension, TimeExtension: + panic(fmt.Sprint("msgp: forbidden extension type:", typ)) + } + if _, ok := extensionReg[typ]; ok { + panic(fmt.Sprint("msgp: RegisterExtension() called with typ", typ, "more than once")) + } + extensionReg[typ] = f +} + +// ExtensionTypeError is an error type returned +// when there is a mis-match between an extension type +// and the type encoded on the wire +type ExtensionTypeError struct { + Got int8 + Want int8 +} + +// Error implements the error interface +func (e ExtensionTypeError) Error() string { + return fmt.Sprintf("msgp: error decoding extension: wanted type %d; got type %d", e.Want, e.Got) +} + +// Resumable returns 'true' for ExtensionTypeErrors +func (e ExtensionTypeError) Resumable() bool { return true } + +func errExt(got int8, wanted int8) error { + return ExtensionTypeError{Got: got, Want: wanted} +} + +// Extension is the interface fulfilled +// by types that want to define their +// own binary encoding. +type Extension interface { + // ExtensionType should return + // a int8 that identifies the concrete + // type of the extension. (Types <0 are + // officially reserved by the MessagePack + // specifications.) + ExtensionType() int8 + + // Len should return the length + // of the data to be encoded + Len() int + + // MarshalBinaryTo should copy + // the data into the supplied slice, + // assuming that the slice has length Len() + MarshalBinaryTo([]byte) error + + UnmarshalBinary([]byte) error +} + +// RawExtension implements the Extension interface +type RawExtension struct { + Data []byte + Type int8 +} + +// ExtensionType implements Extension.ExtensionType, and returns r.Type +func (r *RawExtension) ExtensionType() int8 { return r.Type } + +// Len implements Extension.Len, and returns len(r.Data) +func (r *RawExtension) Len() int { return len(r.Data) } + +// MarshalBinaryTo implements Extension.MarshalBinaryTo, +// and returns a copy of r.Data +func (r *RawExtension) MarshalBinaryTo(d []byte) error { + copy(d, r.Data) + return nil +} + +// UnmarshalBinary implements Extension.UnmarshalBinary, +// and sets r.Data to the contents of the provided slice +func (r *RawExtension) UnmarshalBinary(b []byte) error { + if cap(r.Data) >= len(b) { + r.Data = r.Data[0:len(b)] + } else { + r.Data = make([]byte, len(b)) + } + copy(r.Data, b) + return nil +} + +// WriteExtension writes an extension type to the writer +func (mw *Writer) WriteExtension(e Extension) error { + l := e.Len() + var err error + switch l { + case 0: + o, err := mw.require(3) + if err != nil { + return err + } + mw.buf[o] = mext8 + mw.buf[o+1] = 0 + mw.buf[o+2] = byte(e.ExtensionType()) + case 1: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext1 + mw.buf[o+1] = byte(e.ExtensionType()) + case 2: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext2 + mw.buf[o+1] = byte(e.ExtensionType()) + case 4: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext4 + mw.buf[o+1] = byte(e.ExtensionType()) + case 8: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext8 + mw.buf[o+1] = byte(e.ExtensionType()) + case 16: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext16 + mw.buf[o+1] = byte(e.ExtensionType()) + default: + switch { + case l < math.MaxUint8: + o, err := mw.require(3) + if err != nil { + return err + } + mw.buf[o] = mext8 + mw.buf[o+1] = byte(uint8(l)) + mw.buf[o+2] = byte(e.ExtensionType()) + case l < math.MaxUint16: + o, err := mw.require(4) + if err != nil { + return err + } + mw.buf[o] = mext16 + big.PutUint16(mw.buf[o+1:], uint16(l)) + mw.buf[o+3] = byte(e.ExtensionType()) + default: + o, err := mw.require(6) + if err != nil { + return err + } + mw.buf[o] = mext32 + big.PutUint32(mw.buf[o+1:], uint32(l)) + mw.buf[o+5] = byte(e.ExtensionType()) + } + } + // we can only write directly to the + // buffer if we're sure that it + // fits the object + if l <= mw.bufsize() { + o, err := mw.require(l) + if err != nil { + return err + } + return e.MarshalBinaryTo(mw.buf[o:]) + } + // here we create a new buffer + // just large enough for the body + // and save it as the write buffer + err = mw.flush() + if err != nil { + return err + } + buf := make([]byte, l) + err = e.MarshalBinaryTo(buf) + if err != nil { + return err + } + mw.buf = buf + mw.wloc = l + return nil +} + +// peek at the extension type, assuming the next +// kind to be read is Extension +func (m *Reader) peekExtensionType() (int8, error) { + p, err := m.R.Peek(2) + if err != nil { + return 0, err + } + spec := sizes[p[0]] + if spec.typ != ExtensionType { + return 0, badPrefix(ExtensionType, p[0]) + } + if spec.extra == constsize { + return int8(p[1]), nil + } + size := spec.size + p, err = m.R.Peek(int(size)) + if err != nil { + return 0, err + } + return int8(p[size-1]), nil +} + +// peekExtension peeks at the extension encoding type +// (must guarantee at least 1 byte in 'b') +func peekExtension(b []byte) (int8, error) { + spec := sizes[b[0]] + size := spec.size + if spec.typ != ExtensionType { + return 0, badPrefix(ExtensionType, b[0]) + } + if len(b) < int(size) { + return 0, ErrShortBytes + } + // for fixed extensions, + // the type information is in + // the second byte + if spec.extra == constsize { + return int8(b[1]), nil + } + // otherwise, it's in the last + // part of the prefix + return int8(b[size-1]), nil +} + +// ReadExtension reads the next object from the reader +// as an extension. ReadExtension will fail if the next +// object in the stream is not an extension, or if +// e.Type() is not the same as the wire type. +func (m *Reader) ReadExtension(e Extension) (err error) { + var p []byte + p, err = m.R.Peek(2) + if err != nil { + return + } + lead := p[0] + var read int + var off int + switch lead { + case mfixext1: + if int8(p[1]) != e.ExtensionType() { + err = errExt(int8(p[1]), e.ExtensionType()) + return + } + p, err = m.R.Peek(3) + if err != nil { + return + } + err = e.UnmarshalBinary(p[2:]) + if err == nil { + _, err = m.R.Skip(3) + } + return + + case mfixext2: + if int8(p[1]) != e.ExtensionType() { + err = errExt(int8(p[1]), e.ExtensionType()) + return + } + p, err = m.R.Peek(4) + if err != nil { + return + } + err = e.UnmarshalBinary(p[2:]) + if err == nil { + _, err = m.R.Skip(4) + } + return + + case mfixext4: + if int8(p[1]) != e.ExtensionType() { + err = errExt(int8(p[1]), e.ExtensionType()) + return + } + p, err = m.R.Peek(6) + if err != nil { + return + } + err = e.UnmarshalBinary(p[2:]) + if err == nil { + _, err = m.R.Skip(6) + } + return + + case mfixext8: + if int8(p[1]) != e.ExtensionType() { + err = errExt(int8(p[1]), e.ExtensionType()) + return + } + p, err = m.R.Peek(10) + if err != nil { + return + } + err = e.UnmarshalBinary(p[2:]) + if err == nil { + _, err = m.R.Skip(10) + } + return + + case mfixext16: + if int8(p[1]) != e.ExtensionType() { + err = errExt(int8(p[1]), e.ExtensionType()) + return + } + p, err = m.R.Peek(18) + if err != nil { + return + } + err = e.UnmarshalBinary(p[2:]) + if err == nil { + _, err = m.R.Skip(18) + } + return + + case mext8: + p, err = m.R.Peek(3) + if err != nil { + return + } + if int8(p[2]) != e.ExtensionType() { + err = errExt(int8(p[2]), e.ExtensionType()) + return + } + read = int(uint8(p[1])) + off = 3 + + case mext16: + p, err = m.R.Peek(4) + if err != nil { + return + } + if int8(p[3]) != e.ExtensionType() { + err = errExt(int8(p[3]), e.ExtensionType()) + return + } + read = int(big.Uint16(p[1:])) + off = 4 + + case mext32: + p, err = m.R.Peek(6) + if err != nil { + return + } + if int8(p[5]) != e.ExtensionType() { + err = errExt(int8(p[5]), e.ExtensionType()) + return + } + read = int(big.Uint32(p[1:])) + off = 6 + + default: + err = badPrefix(ExtensionType, lead) + return + } + + p, err = m.R.Peek(read + off) + if err != nil { + return + } + err = e.UnmarshalBinary(p[off:]) + if err == nil { + _, err = m.R.Skip(read + off) + } + return +} + +// AppendExtension appends a MessagePack extension to the provided slice +func AppendExtension(b []byte, e Extension) ([]byte, error) { + l := e.Len() + var o []byte + var n int + switch l { + case 0: + o, n = ensure(b, 3) + o[n] = mext8 + o[n+1] = 0 + o[n+2] = byte(e.ExtensionType()) + return o[:n+3], nil + case 1: + o, n = ensure(b, 3) + o[n] = mfixext1 + o[n+1] = byte(e.ExtensionType()) + n += 2 + case 2: + o, n = ensure(b, 4) + o[n] = mfixext2 + o[n+1] = byte(e.ExtensionType()) + n += 2 + case 4: + o, n = ensure(b, 6) + o[n] = mfixext4 + o[n+1] = byte(e.ExtensionType()) + n += 2 + case 8: + o, n = ensure(b, 10) + o[n] = mfixext8 + o[n+1] = byte(e.ExtensionType()) + n += 2 + case 16: + o, n = ensure(b, 18) + o[n] = mfixext16 + o[n+1] = byte(e.ExtensionType()) + n += 2 + default: + switch { + case l < math.MaxUint8: + o, n = ensure(b, l+3) + o[n] = mext8 + o[n+1] = byte(uint8(l)) + o[n+2] = byte(e.ExtensionType()) + n += 3 + case l < math.MaxUint16: + o, n = ensure(b, l+4) + o[n] = mext16 + big.PutUint16(o[n+1:], uint16(l)) + o[n+3] = byte(e.ExtensionType()) + n += 4 + default: + o, n = ensure(b, l+6) + o[n] = mext32 + big.PutUint32(o[n+1:], uint32(l)) + o[n+5] = byte(e.ExtensionType()) + n += 6 + } + } + return o, e.MarshalBinaryTo(o[n:]) +} + +// ReadExtensionBytes reads an extension from 'b' into 'e' +// and returns any remaining bytes. +// Possible errors: +// - ErrShortBytes ('b' not long enough) +// - ExtensionTypeError{} (wire type not the same as e.Type()) +// - TypeError{} (next object not an extension) +// - InvalidPrefixError +// - An umarshal error returned from e.UnmarshalBinary +func ReadExtensionBytes(b []byte, e Extension) ([]byte, error) { + l := len(b) + if l < 3 { + return b, ErrShortBytes + } + lead := b[0] + var ( + sz int // size of 'data' + off int // offset of 'data' + typ int8 + ) + switch lead { + case mfixext1: + typ = int8(b[1]) + sz = 1 + off = 2 + case mfixext2: + typ = int8(b[1]) + sz = 2 + off = 2 + case mfixext4: + typ = int8(b[1]) + sz = 4 + off = 2 + case mfixext8: + typ = int8(b[1]) + sz = 8 + off = 2 + case mfixext16: + typ = int8(b[1]) + sz = 16 + off = 2 + case mext8: + sz = int(uint8(b[1])) + typ = int8(b[2]) + off = 3 + if sz == 0 { + return b[3:], e.UnmarshalBinary(b[3:3]) + } + case mext16: + if l < 4 { + return b, ErrShortBytes + } + sz = int(big.Uint16(b[1:])) + typ = int8(b[3]) + off = 4 + case mext32: + if l < 6 { + return b, ErrShortBytes + } + sz = int(big.Uint32(b[1:])) + typ = int8(b[5]) + off = 6 + default: + return b, badPrefix(ExtensionType, lead) + } + + if typ != e.ExtensionType() { + return b, errExt(typ, e.ExtensionType()) + } + + // the data of the extension starts + // at 'off' and is 'sz' bytes long + if len(b[off:]) < sz { + return b, ErrShortBytes + } + tot := off + sz + return b[tot:], e.UnmarshalBinary(b[off:tot]) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/file.go b/vendor/github.com/tinylib/msgp/msgp/file.go new file mode 100644 index 0000000..8e7370e --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/file.go @@ -0,0 +1,92 @@ +// +build linux darwin dragonfly freebsd netbsd openbsd +// +build !appengine + +package msgp + +import ( + "os" + "syscall" +) + +// ReadFile reads a file into 'dst' using +// a read-only memory mapping. Consequently, +// the file must be mmap-able, and the +// Unmarshaler should never write to +// the source memory. (Methods generated +// by the msgp tool obey that constraint, but +// user-defined implementations may not.) +// +// Reading and writing through file mappings +// is only efficient for large files; small +// files are best read and written using +// the ordinary streaming interfaces. +// +func ReadFile(dst Unmarshaler, file *os.File) error { + stat, err := file.Stat() + if err != nil { + return err + } + data, err := syscall.Mmap(int(file.Fd()), 0, int(stat.Size()), syscall.PROT_READ, syscall.MAP_SHARED) + if err != nil { + return err + } + adviseRead(data) + _, err = dst.UnmarshalMsg(data) + uerr := syscall.Munmap(data) + if err == nil { + err = uerr + } + return err +} + +// MarshalSizer is the combination +// of the Marshaler and Sizer +// interfaces. +type MarshalSizer interface { + Marshaler + Sizer +} + +// WriteFile writes a file from 'src' using +// memory mapping. It overwrites the entire +// contents of the previous file. +// The mapping size is calculated +// using the `Msgsize()` method +// of 'src', so it must produce a result +// equal to or greater than the actual encoded +// size of the object. Otherwise, +// a fault (SIGBUS) will occur. +// +// Reading and writing through file mappings +// is only efficient for large files; small +// files are best read and written using +// the ordinary streaming interfaces. +// +// NOTE: The performance of this call +// is highly OS- and filesystem-dependent. +// Users should take care to test that this +// performs as expected in a production environment. +// (Linux users should run a kernel and filesystem +// that support fallocate(2) for the best results.) +func WriteFile(src MarshalSizer, file *os.File) error { + sz := src.Msgsize() + err := fallocate(file, int64(sz)) + if err != nil { + return err + } + data, err := syscall.Mmap(int(file.Fd()), 0, sz, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED) + if err != nil { + return err + } + adviseWrite(data) + chunk := data[:0] + chunk, err = src.MarshalMsg(chunk) + if err != nil { + return err + } + uerr := syscall.Munmap(data) + if uerr != nil { + return uerr + } + return file.Truncate(int64(len(chunk))) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/file_port.go b/vendor/github.com/tinylib/msgp/msgp/file_port.go new file mode 100644 index 0000000..6e654db --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/file_port.go @@ -0,0 +1,47 @@ +// +build windows appengine + +package msgp + +import ( + "io/ioutil" + "os" +) + +// MarshalSizer is the combination +// of the Marshaler and Sizer +// interfaces. +type MarshalSizer interface { + Marshaler + Sizer +} + +func ReadFile(dst Unmarshaler, file *os.File) error { + if u, ok := dst.(Decodable); ok { + return u.DecodeMsg(NewReader(file)) + } + + data, err := ioutil.ReadAll(file) + if err != nil { + return err + } + _, err = dst.UnmarshalMsg(data) + return err +} + +func WriteFile(src MarshalSizer, file *os.File) error { + if e, ok := src.(Encodable); ok { + w := NewWriter(file) + err := e.EncodeMsg(w) + if err == nil { + err = w.Flush() + } + return err + } + + raw, err := src.MarshalMsg(nil) + if err != nil { + return err + } + _, err = file.Write(raw) + return err +} diff --git a/vendor/github.com/tinylib/msgp/msgp/integers.go b/vendor/github.com/tinylib/msgp/msgp/integers.go new file mode 100644 index 0000000..f817d77 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/integers.go @@ -0,0 +1,174 @@ +package msgp + +/* ---------------------------------- + integer encoding utilities + (inline-able) + + TODO(tinylib): there are faster, + albeit non-portable solutions + to the code below. implement + byteswap? + ---------------------------------- */ + +func putMint64(b []byte, i int64) { + b[0] = mint64 + b[1] = byte(i >> 56) + b[2] = byte(i >> 48) + b[3] = byte(i >> 40) + b[4] = byte(i >> 32) + b[5] = byte(i >> 24) + b[6] = byte(i >> 16) + b[7] = byte(i >> 8) + b[8] = byte(i) +} + +func getMint64(b []byte) int64 { + return (int64(b[1]) << 56) | (int64(b[2]) << 48) | + (int64(b[3]) << 40) | (int64(b[4]) << 32) | + (int64(b[5]) << 24) | (int64(b[6]) << 16) | + (int64(b[7]) << 8) | (int64(b[8])) +} + +func putMint32(b []byte, i int32) { + b[0] = mint32 + b[1] = byte(i >> 24) + b[2] = byte(i >> 16) + b[3] = byte(i >> 8) + b[4] = byte(i) +} + +func getMint32(b []byte) int32 { + return (int32(b[1]) << 24) | (int32(b[2]) << 16) | (int32(b[3]) << 8) | (int32(b[4])) +} + +func putMint16(b []byte, i int16) { + b[0] = mint16 + b[1] = byte(i >> 8) + b[2] = byte(i) +} + +func getMint16(b []byte) (i int16) { + return (int16(b[1]) << 8) | int16(b[2]) +} + +func putMint8(b []byte, i int8) { + b[0] = mint8 + b[1] = byte(i) +} + +func getMint8(b []byte) (i int8) { + return int8(b[1]) +} + +func putMuint64(b []byte, u uint64) { + b[0] = muint64 + b[1] = byte(u >> 56) + b[2] = byte(u >> 48) + b[3] = byte(u >> 40) + b[4] = byte(u >> 32) + b[5] = byte(u >> 24) + b[6] = byte(u >> 16) + b[7] = byte(u >> 8) + b[8] = byte(u) +} + +func getMuint64(b []byte) uint64 { + return (uint64(b[1]) << 56) | (uint64(b[2]) << 48) | + (uint64(b[3]) << 40) | (uint64(b[4]) << 32) | + (uint64(b[5]) << 24) | (uint64(b[6]) << 16) | + (uint64(b[7]) << 8) | (uint64(b[8])) +} + +func putMuint32(b []byte, u uint32) { + b[0] = muint32 + b[1] = byte(u >> 24) + b[2] = byte(u >> 16) + b[3] = byte(u >> 8) + b[4] = byte(u) +} + +func getMuint32(b []byte) uint32 { + return (uint32(b[1]) << 24) | (uint32(b[2]) << 16) | (uint32(b[3]) << 8) | (uint32(b[4])) +} + +func putMuint16(b []byte, u uint16) { + b[0] = muint16 + b[1] = byte(u >> 8) + b[2] = byte(u) +} + +func getMuint16(b []byte) uint16 { + return (uint16(b[1]) << 8) | uint16(b[2]) +} + +func putMuint8(b []byte, u uint8) { + b[0] = muint8 + b[1] = byte(u) +} + +func getMuint8(b []byte) uint8 { + return uint8(b[1]) +} + +func getUnix(b []byte) (sec int64, nsec int32) { + sec = (int64(b[0]) << 56) | (int64(b[1]) << 48) | + (int64(b[2]) << 40) | (int64(b[3]) << 32) | + (int64(b[4]) << 24) | (int64(b[5]) << 16) | + (int64(b[6]) << 8) | (int64(b[7])) + + nsec = (int32(b[8]) << 24) | (int32(b[9]) << 16) | (int32(b[10]) << 8) | (int32(b[11])) + return +} + +func putUnix(b []byte, sec int64, nsec int32) { + b[0] = byte(sec >> 56) + b[1] = byte(sec >> 48) + b[2] = byte(sec >> 40) + b[3] = byte(sec >> 32) + b[4] = byte(sec >> 24) + b[5] = byte(sec >> 16) + b[6] = byte(sec >> 8) + b[7] = byte(sec) + b[8] = byte(nsec >> 24) + b[9] = byte(nsec >> 16) + b[10] = byte(nsec >> 8) + b[11] = byte(nsec) +} + +/* ----------------------------- + prefix utilities + ----------------------------- */ + +// write prefix and uint8 +func prefixu8(b []byte, pre byte, sz uint8) { + b[0] = pre + b[1] = byte(sz) +} + +// write prefix and big-endian uint16 +func prefixu16(b []byte, pre byte, sz uint16) { + b[0] = pre + b[1] = byte(sz >> 8) + b[2] = byte(sz) +} + +// write prefix and big-endian uint32 +func prefixu32(b []byte, pre byte, sz uint32) { + b[0] = pre + b[1] = byte(sz >> 24) + b[2] = byte(sz >> 16) + b[3] = byte(sz >> 8) + b[4] = byte(sz) +} + +func prefixu64(b []byte, pre byte, sz uint64) { + b[0] = pre + b[1] = byte(sz >> 56) + b[2] = byte(sz >> 48) + b[3] = byte(sz >> 40) + b[4] = byte(sz >> 32) + b[5] = byte(sz >> 24) + b[6] = byte(sz >> 16) + b[7] = byte(sz >> 8) + b[8] = byte(sz) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/json.go b/vendor/github.com/tinylib/msgp/msgp/json.go new file mode 100644 index 0000000..0e11e60 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/json.go @@ -0,0 +1,568 @@ +package msgp + +import ( + "bufio" + "encoding/base64" + "encoding/json" + "io" + "strconv" + "unicode/utf8" +) + +var ( + null = []byte("null") + hex = []byte("0123456789abcdef") +) + +var defuns [_maxtype]func(jsWriter, *Reader) (int, error) + +// note: there is an initialization loop if +// this isn't set up during init() +func init() { + // since none of these functions are inline-able, + // there is not much of a penalty to the indirect + // call. however, this is best expressed as a jump-table... + defuns = [_maxtype]func(jsWriter, *Reader) (int, error){ + StrType: rwString, + BinType: rwBytes, + MapType: rwMap, + ArrayType: rwArray, + Float64Type: rwFloat64, + Float32Type: rwFloat32, + BoolType: rwBool, + IntType: rwInt, + UintType: rwUint, + NilType: rwNil, + ExtensionType: rwExtension, + Complex64Type: rwExtension, + Complex128Type: rwExtension, + TimeType: rwTime, + } +} + +// this is the interface +// used to write json +type jsWriter interface { + io.Writer + io.ByteWriter + WriteString(string) (int, error) +} + +// CopyToJSON reads MessagePack from 'src' and copies it +// as JSON to 'dst' until EOF. +func CopyToJSON(dst io.Writer, src io.Reader) (n int64, err error) { + r := NewReader(src) + n, err = r.WriteToJSON(dst) + freeR(r) + return +} + +// WriteToJSON translates MessagePack from 'r' and writes it as +// JSON to 'w' until the underlying reader returns io.EOF. It returns +// the number of bytes written, and an error if it stopped before EOF. +func (r *Reader) WriteToJSON(w io.Writer) (n int64, err error) { + var j jsWriter + var bf *bufio.Writer + if jsw, ok := w.(jsWriter); ok { + j = jsw + } else { + bf = bufio.NewWriter(w) + j = bf + } + var nn int + for err == nil { + nn, err = rwNext(j, r) + n += int64(nn) + } + if err != io.EOF { + if bf != nil { + bf.Flush() + } + return + } + err = nil + if bf != nil { + err = bf.Flush() + } + return +} + +func rwNext(w jsWriter, src *Reader) (int, error) { + t, err := src.NextType() + if err != nil { + return 0, err + } + return defuns[t](w, src) +} + +func rwMap(dst jsWriter, src *Reader) (n int, err error) { + var comma bool + var sz uint32 + var field []byte + + sz, err = src.ReadMapHeader() + if err != nil { + return + } + + if sz == 0 { + return dst.WriteString("{}") + } + + err = dst.WriteByte('{') + if err != nil { + return + } + n++ + var nn int + for i := uint32(0); i < sz; i++ { + if comma { + err = dst.WriteByte(',') + if err != nil { + return + } + n++ + } + + field, err = src.ReadMapKeyPtr() + if err != nil { + return + } + nn, err = rwquoted(dst, field) + n += nn + if err != nil { + return + } + + err = dst.WriteByte(':') + if err != nil { + return + } + n++ + nn, err = rwNext(dst, src) + n += nn + if err != nil { + return + } + if !comma { + comma = true + } + } + + err = dst.WriteByte('}') + if err != nil { + return + } + n++ + return +} + +func rwArray(dst jsWriter, src *Reader) (n int, err error) { + err = dst.WriteByte('[') + if err != nil { + return + } + var sz uint32 + var nn int + sz, err = src.ReadArrayHeader() + if err != nil { + return + } + comma := false + for i := uint32(0); i < sz; i++ { + if comma { + err = dst.WriteByte(',') + if err != nil { + return + } + n++ + } + nn, err = rwNext(dst, src) + n += nn + if err != nil { + return + } + comma = true + } + + err = dst.WriteByte(']') + if err != nil { + return + } + n++ + return +} + +func rwNil(dst jsWriter, src *Reader) (int, error) { + err := src.ReadNil() + if err != nil { + return 0, err + } + return dst.Write(null) +} + +func rwFloat32(dst jsWriter, src *Reader) (int, error) { + f, err := src.ReadFloat32() + if err != nil { + return 0, err + } + src.scratch = strconv.AppendFloat(src.scratch[:0], float64(f), 'f', -1, 32) + return dst.Write(src.scratch) +} + +func rwFloat64(dst jsWriter, src *Reader) (int, error) { + f, err := src.ReadFloat64() + if err != nil { + return 0, err + } + src.scratch = strconv.AppendFloat(src.scratch[:0], f, 'f', -1, 64) + return dst.Write(src.scratch) +} + +func rwInt(dst jsWriter, src *Reader) (int, error) { + i, err := src.ReadInt64() + if err != nil { + return 0, err + } + src.scratch = strconv.AppendInt(src.scratch[:0], i, 10) + return dst.Write(src.scratch) +} + +func rwUint(dst jsWriter, src *Reader) (int, error) { + u, err := src.ReadUint64() + if err != nil { + return 0, err + } + src.scratch = strconv.AppendUint(src.scratch[:0], u, 10) + return dst.Write(src.scratch) +} + +func rwBool(dst jsWriter, src *Reader) (int, error) { + b, err := src.ReadBool() + if err != nil { + return 0, err + } + if b { + return dst.WriteString("true") + } + return dst.WriteString("false") +} + +func rwTime(dst jsWriter, src *Reader) (int, error) { + t, err := src.ReadTime() + if err != nil { + return 0, err + } + bts, err := t.MarshalJSON() + if err != nil { + return 0, err + } + return dst.Write(bts) +} + +func rwExtension(dst jsWriter, src *Reader) (n int, err error) { + et, err := src.peekExtensionType() + if err != nil { + return 0, err + } + + // registered extensions can override + // the JSON encoding + if j, ok := extensionReg[et]; ok { + var bts []byte + e := j() + err = src.ReadExtension(e) + if err != nil { + return + } + bts, err = json.Marshal(e) + if err != nil { + return + } + return dst.Write(bts) + } + + e := RawExtension{} + e.Type = et + err = src.ReadExtension(&e) + if err != nil { + return + } + + var nn int + err = dst.WriteByte('{') + if err != nil { + return + } + n++ + + nn, err = dst.WriteString(`"type:"`) + n += nn + if err != nil { + return + } + + src.scratch = strconv.AppendInt(src.scratch[0:0], int64(e.Type), 10) + nn, err = dst.Write(src.scratch) + n += nn + if err != nil { + return + } + + nn, err = dst.WriteString(`,"data":"`) + n += nn + if err != nil { + return + } + + enc := base64.NewEncoder(base64.StdEncoding, dst) + + nn, err = enc.Write(e.Data) + n += nn + if err != nil { + return + } + err = enc.Close() + if err != nil { + return + } + nn, err = dst.WriteString(`"}`) + n += nn + return +} + +func rwString(dst jsWriter, src *Reader) (n int, err error) { + var p []byte + p, err = src.R.Peek(1) + if err != nil { + return + } + lead := p[0] + var read int + + if isfixstr(lead) { + read = int(rfixstr(lead)) + src.R.Skip(1) + goto write + } + + switch lead { + case mstr8: + p, err = src.R.Next(2) + if err != nil { + return + } + read = int(uint8(p[1])) + case mstr16: + p, err = src.R.Next(3) + if err != nil { + return + } + read = int(big.Uint16(p[1:])) + case mstr32: + p, err = src.R.Next(5) + if err != nil { + return + } + read = int(big.Uint32(p[1:])) + default: + err = badPrefix(StrType, lead) + return + } +write: + p, err = src.R.Next(read) + if err != nil { + return + } + n, err = rwquoted(dst, p) + return +} + +func rwBytes(dst jsWriter, src *Reader) (n int, err error) { + var nn int + err = dst.WriteByte('"') + if err != nil { + return + } + n++ + src.scratch, err = src.ReadBytes(src.scratch[:0]) + if err != nil { + return + } + enc := base64.NewEncoder(base64.StdEncoding, dst) + nn, err = enc.Write(src.scratch) + n += nn + if err != nil { + return + } + err = enc.Close() + if err != nil { + return + } + err = dst.WriteByte('"') + if err != nil { + return + } + n++ + return +} + +// Below (c) The Go Authors, 2009-2014 +// Subject to the BSD-style license found at http://golang.org +// +// see: encoding/json/encode.go:(*encodeState).stringbytes() +func rwquoted(dst jsWriter, s []byte) (n int, err error) { + var nn int + err = dst.WriteByte('"') + if err != nil { + return + } + n++ + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + nn, err = dst.Write(s[start:i]) + n += nn + if err != nil { + return + } + } + switch b { + case '\\', '"': + err = dst.WriteByte('\\') + if err != nil { + return + } + n++ + err = dst.WriteByte(b) + if err != nil { + return + } + n++ + case '\n': + err = dst.WriteByte('\\') + if err != nil { + return + } + n++ + err = dst.WriteByte('n') + if err != nil { + return + } + n++ + case '\r': + err = dst.WriteByte('\\') + if err != nil { + return + } + n++ + err = dst.WriteByte('r') + if err != nil { + return + } + n++ + case '\t': + err = dst.WriteByte('\\') + if err != nil { + return + } + n++ + err = dst.WriteByte('t') + if err != nil { + return + } + n++ + default: + // This encodes bytes < 0x20 except for \t, \n and \r. + // It also escapes <, >, and & + // because they can lead to security holes when + // user-controlled strings are rendered into JSON + // and served to some browsers. + nn, err = dst.WriteString(`\u00`) + n += nn + if err != nil { + return + } + err = dst.WriteByte(hex[b>>4]) + if err != nil { + return + } + n++ + err = dst.WriteByte(hex[b&0xF]) + if err != nil { + return + } + n++ + } + i++ + start = i + continue + } + c, size := utf8.DecodeRune(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + nn, err = dst.Write(s[start:i]) + n += nn + if err != nil { + return + } + } + nn, err = dst.WriteString(`\ufffd`) + n += nn + if err != nil { + return + } + i += size + start = i + continue + } + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. + if c == '\u2028' || c == '\u2029' { + if start < i { + nn, err = dst.Write(s[start:i]) + n += nn + if err != nil { + return + } + } + nn, err = dst.WriteString(`\u202`) + n += nn + if err != nil { + return + } + err = dst.WriteByte(hex[c&0xF]) + if err != nil { + return + } + n++ + i += size + start = i + continue + } + i += size + } + if start < len(s) { + nn, err = dst.Write(s[start:]) + n += nn + if err != nil { + return + } + } + err = dst.WriteByte('"') + if err != nil { + return + } + n++ + return +} diff --git a/vendor/github.com/tinylib/msgp/msgp/json_bytes.go b/vendor/github.com/tinylib/msgp/msgp/json_bytes.go new file mode 100644 index 0000000..438caf5 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/json_bytes.go @@ -0,0 +1,363 @@ +package msgp + +import ( + "bufio" + "encoding/base64" + "encoding/json" + "io" + "strconv" + "time" +) + +var unfuns [_maxtype]func(jsWriter, []byte, []byte) ([]byte, []byte, error) + +func init() { + + // NOTE(pmh): this is best expressed as a jump table, + // but gc doesn't do that yet. revisit post-go1.5. + unfuns = [_maxtype]func(jsWriter, []byte, []byte) ([]byte, []byte, error){ + StrType: rwStringBytes, + BinType: rwBytesBytes, + MapType: rwMapBytes, + ArrayType: rwArrayBytes, + Float64Type: rwFloat64Bytes, + Float32Type: rwFloat32Bytes, + BoolType: rwBoolBytes, + IntType: rwIntBytes, + UintType: rwUintBytes, + NilType: rwNullBytes, + ExtensionType: rwExtensionBytes, + Complex64Type: rwExtensionBytes, + Complex128Type: rwExtensionBytes, + TimeType: rwTimeBytes, + } +} + +// UnmarshalAsJSON takes raw messagepack and writes +// it as JSON to 'w'. If an error is returned, the +// bytes not translated will also be returned. If +// no errors are encountered, the length of the returned +// slice will be zero. +func UnmarshalAsJSON(w io.Writer, msg []byte) ([]byte, error) { + var ( + scratch []byte + cast bool + dst jsWriter + err error + ) + if jsw, ok := w.(jsWriter); ok { + dst = jsw + cast = true + } else { + dst = bufio.NewWriterSize(w, 512) + } + for len(msg) > 0 && err == nil { + msg, scratch, err = writeNext(dst, msg, scratch) + } + if !cast && err == nil { + err = dst.(*bufio.Writer).Flush() + } + return msg, err +} + +func writeNext(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + if len(msg) < 1 { + return msg, scratch, ErrShortBytes + } + t := getType(msg[0]) + if t == InvalidType { + return msg, scratch, InvalidPrefixError(msg[0]) + } + if t == ExtensionType { + et, err := peekExtension(msg) + if err != nil { + return nil, scratch, err + } + if et == TimeExtension { + t = TimeType + } + } + return unfuns[t](w, msg, scratch) +} + +func rwArrayBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + sz, msg, err := ReadArrayHeaderBytes(msg) + if err != nil { + return msg, scratch, err + } + err = w.WriteByte('[') + if err != nil { + return msg, scratch, err + } + for i := uint32(0); i < sz; i++ { + if i != 0 { + err = w.WriteByte(',') + if err != nil { + return msg, scratch, err + } + } + msg, scratch, err = writeNext(w, msg, scratch) + if err != nil { + return msg, scratch, err + } + } + err = w.WriteByte(']') + return msg, scratch, err +} + +func rwMapBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + sz, msg, err := ReadMapHeaderBytes(msg) + if err != nil { + return msg, scratch, err + } + err = w.WriteByte('{') + if err != nil { + return msg, scratch, err + } + for i := uint32(0); i < sz; i++ { + if i != 0 { + err = w.WriteByte(',') + if err != nil { + return msg, scratch, err + } + } + msg, scratch, err = rwMapKeyBytes(w, msg, scratch) + if err != nil { + return msg, scratch, err + } + err = w.WriteByte(':') + if err != nil { + return msg, scratch, err + } + msg, scratch, err = writeNext(w, msg, scratch) + if err != nil { + return msg, scratch, err + } + } + err = w.WriteByte('}') + return msg, scratch, err +} + +func rwMapKeyBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + msg, scratch, err := rwStringBytes(w, msg, scratch) + if err != nil { + if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType { + return rwBytesBytes(w, msg, scratch) + } + } + return msg, scratch, err +} + +func rwStringBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + str, msg, err := ReadStringZC(msg) + if err != nil { + return msg, scratch, err + } + _, err = rwquoted(w, str) + return msg, scratch, err +} + +func rwBytesBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + bts, msg, err := ReadBytesZC(msg) + if err != nil { + return msg, scratch, err + } + l := base64.StdEncoding.EncodedLen(len(bts)) + if cap(scratch) >= l { + scratch = scratch[0:l] + } else { + scratch = make([]byte, l) + } + base64.StdEncoding.Encode(scratch, bts) + err = w.WriteByte('"') + if err != nil { + return msg, scratch, err + } + _, err = w.Write(scratch) + if err != nil { + return msg, scratch, err + } + err = w.WriteByte('"') + return msg, scratch, err +} + +func rwNullBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + msg, err := ReadNilBytes(msg) + if err != nil { + return msg, scratch, err + } + _, err = w.Write(null) + return msg, scratch, err +} + +func rwBoolBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + b, msg, err := ReadBoolBytes(msg) + if err != nil { + return msg, scratch, err + } + if b { + _, err = w.WriteString("true") + return msg, scratch, err + } + _, err = w.WriteString("false") + return msg, scratch, err +} + +func rwIntBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + i, msg, err := ReadInt64Bytes(msg) + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendInt(scratch[0:0], i, 10) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwUintBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + u, msg, err := ReadUint64Bytes(msg) + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendUint(scratch[0:0], u, 10) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwFloatBytes(w jsWriter, msg []byte, f64 bool, scratch []byte) ([]byte, []byte, error) { + var f float64 + var err error + var sz int + if f64 { + sz = 64 + f, msg, err = ReadFloat64Bytes(msg) + } else { + sz = 32 + var v float32 + v, msg, err = ReadFloat32Bytes(msg) + f = float64(v) + } + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendFloat(scratch, f, 'f', -1, sz) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwFloat32Bytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + var f float32 + var err error + f, msg, err = ReadFloat32Bytes(msg) + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendFloat(scratch[:0], float64(f), 'f', -1, 32) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwFloat64Bytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + var f float64 + var err error + f, msg, err = ReadFloat64Bytes(msg) + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendFloat(scratch[:0], f, 'f', -1, 64) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwTimeBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + var t time.Time + var err error + t, msg, err = ReadTimeBytes(msg) + if err != nil { + return msg, scratch, err + } + bts, err := t.MarshalJSON() + if err != nil { + return msg, scratch, err + } + _, err = w.Write(bts) + return msg, scratch, err +} + +func rwExtensionBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + var err error + var et int8 + et, err = peekExtension(msg) + if err != nil { + return msg, scratch, err + } + + // if it's time.Time + if et == TimeExtension { + var tm time.Time + tm, msg, err = ReadTimeBytes(msg) + if err != nil { + return msg, scratch, err + } + bts, err := tm.MarshalJSON() + if err != nil { + return msg, scratch, err + } + _, err = w.Write(bts) + return msg, scratch, err + } + + // if the extension is registered, + // use its canonical JSON form + if f, ok := extensionReg[et]; ok { + e := f() + msg, err = ReadExtensionBytes(msg, e) + if err != nil { + return msg, scratch, err + } + bts, err := json.Marshal(e) + if err != nil { + return msg, scratch, err + } + _, err = w.Write(bts) + return msg, scratch, err + } + + // otherwise, write `{"type": , "data": ""}` + r := RawExtension{} + r.Type = et + msg, err = ReadExtensionBytes(msg, &r) + if err != nil { + return msg, scratch, err + } + scratch, err = writeExt(w, r, scratch) + return msg, scratch, err +} + +func writeExt(w jsWriter, r RawExtension, scratch []byte) ([]byte, error) { + _, err := w.WriteString(`{"type":`) + if err != nil { + return scratch, err + } + scratch = strconv.AppendInt(scratch[0:0], int64(r.Type), 10) + _, err = w.Write(scratch) + if err != nil { + return scratch, err + } + _, err = w.WriteString(`,"data":"`) + if err != nil { + return scratch, err + } + l := base64.StdEncoding.EncodedLen(len(r.Data)) + if cap(scratch) >= l { + scratch = scratch[0:l] + } else { + scratch = make([]byte, l) + } + base64.StdEncoding.Encode(scratch, r.Data) + _, err = w.Write(scratch) + if err != nil { + return scratch, err + } + _, err = w.WriteString(`"}`) + return scratch, err +} diff --git a/vendor/github.com/tinylib/msgp/msgp/number.go b/vendor/github.com/tinylib/msgp/msgp/number.go new file mode 100644 index 0000000..ad07ef9 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/number.go @@ -0,0 +1,267 @@ +package msgp + +import ( + "math" + "strconv" +) + +// The portable parts of the Number implementation + +// Number can be +// an int64, uint64, float32, +// or float64 internally. +// It can decode itself +// from any of the native +// messagepack number types. +// The zero-value of Number +// is Int(0). Using the equality +// operator with Number compares +// both the type and the value +// of the number. +type Number struct { + // internally, this + // is just a tagged union. + // the raw bits of the number + // are stored the same way regardless. + bits uint64 + typ Type +} + +// AsInt sets the number to an int64. +func (n *Number) AsInt(i int64) { + + // we always store int(0) + // as {0, InvalidType} in + // order to preserve + // the behavior of the == operator + if i == 0 { + n.typ = InvalidType + n.bits = 0 + return + } + + n.typ = IntType + n.bits = uint64(i) +} + +// AsUint sets the number to a uint64. +func (n *Number) AsUint(u uint64) { + n.typ = UintType + n.bits = u +} + +// AsFloat32 sets the value of the number +// to a float32. +func (n *Number) AsFloat32(f float32) { + n.typ = Float32Type + n.bits = uint64(math.Float32bits(f)) +} + +// AsFloat64 sets the value of the +// number to a float64. +func (n *Number) AsFloat64(f float64) { + n.typ = Float64Type + n.bits = math.Float64bits(f) +} + +// Int casts the number as an int64, and +// returns whether or not that was the +// underlying type. +func (n *Number) Int() (int64, bool) { + return int64(n.bits), n.typ == IntType || n.typ == InvalidType +} + +// Uint casts the number as a uint64, and returns +// whether or not that was the underlying type. +func (n *Number) Uint() (uint64, bool) { + return n.bits, n.typ == UintType +} + +// Float casts the number to a float64, and +// returns whether or not that was the underlying +// type (either a float64 or a float32). +func (n *Number) Float() (float64, bool) { + switch n.typ { + case Float32Type: + return float64(math.Float32frombits(uint32(n.bits))), true + case Float64Type: + return math.Float64frombits(n.bits), true + default: + return 0.0, false + } +} + +// Type will return one of: +// Float64Type, Float32Type, UintType, or IntType. +func (n *Number) Type() Type { + if n.typ == InvalidType { + return IntType + } + return n.typ +} + +// DecodeMsg implements msgp.Decodable +func (n *Number) DecodeMsg(r *Reader) error { + typ, err := r.NextType() + if err != nil { + return err + } + switch typ { + case Float32Type: + f, err := r.ReadFloat32() + if err != nil { + return err + } + n.AsFloat32(f) + return nil + case Float64Type: + f, err := r.ReadFloat64() + if err != nil { + return err + } + n.AsFloat64(f) + return nil + case IntType: + i, err := r.ReadInt64() + if err != nil { + return err + } + n.AsInt(i) + return nil + case UintType: + u, err := r.ReadUint64() + if err != nil { + return err + } + n.AsUint(u) + return nil + default: + return TypeError{Encoded: typ, Method: IntType} + } +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (n *Number) UnmarshalMsg(b []byte) ([]byte, error) { + typ := NextType(b) + switch typ { + case IntType: + i, o, err := ReadInt64Bytes(b) + if err != nil { + return b, err + } + n.AsInt(i) + return o, nil + case UintType: + u, o, err := ReadUint64Bytes(b) + if err != nil { + return b, err + } + n.AsUint(u) + return o, nil + case Float64Type: + f, o, err := ReadFloat64Bytes(b) + if err != nil { + return b, err + } + n.AsFloat64(f) + return o, nil + case Float32Type: + f, o, err := ReadFloat32Bytes(b) + if err != nil { + return b, err + } + n.AsFloat32(f) + return o, nil + default: + return b, TypeError{Method: IntType, Encoded: typ} + } +} + +// MarshalMsg implements msgp.Marshaler +func (n *Number) MarshalMsg(b []byte) ([]byte, error) { + switch n.typ { + case IntType: + return AppendInt64(b, int64(n.bits)), nil + case UintType: + return AppendUint64(b, uint64(n.bits)), nil + case Float64Type: + return AppendFloat64(b, math.Float64frombits(n.bits)), nil + case Float32Type: + return AppendFloat32(b, math.Float32frombits(uint32(n.bits))), nil + default: + return AppendInt64(b, 0), nil + } +} + +// EncodeMsg implements msgp.Encodable +func (n *Number) EncodeMsg(w *Writer) error { + switch n.typ { + case IntType: + return w.WriteInt64(int64(n.bits)) + case UintType: + return w.WriteUint64(n.bits) + case Float64Type: + return w.WriteFloat64(math.Float64frombits(n.bits)) + case Float32Type: + return w.WriteFloat32(math.Float32frombits(uint32(n.bits))) + default: + return w.WriteInt64(0) + } +} + +// Msgsize implements msgp.Sizer +func (n *Number) Msgsize() int { + switch n.typ { + case Float32Type: + return Float32Size + case Float64Type: + return Float64Size + case IntType: + return Int64Size + case UintType: + return Uint64Size + default: + return 1 // fixint(0) + } +} + +// MarshalJSON implements json.Marshaler +func (n *Number) MarshalJSON() ([]byte, error) { + t := n.Type() + if t == InvalidType { + return []byte{'0'}, nil + } + out := make([]byte, 0, 32) + switch t { + case Float32Type, Float64Type: + f, _ := n.Float() + return strconv.AppendFloat(out, f, 'f', -1, 64), nil + case IntType: + i, _ := n.Int() + return strconv.AppendInt(out, i, 10), nil + case UintType: + u, _ := n.Uint() + return strconv.AppendUint(out, u, 10), nil + default: + panic("(*Number).typ is invalid") + } +} + +// String implements fmt.Stringer +func (n *Number) String() string { + switch n.typ { + case InvalidType: + return "0" + case Float32Type, Float64Type: + f, _ := n.Float() + return strconv.FormatFloat(f, 'f', -1, 64) + case IntType: + i, _ := n.Int() + return strconv.FormatInt(i, 10) + case UintType: + u, _ := n.Uint() + return strconv.FormatUint(u, 10) + default: + panic("(*Number).typ is invalid") + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/purego.go b/vendor/github.com/tinylib/msgp/msgp/purego.go new file mode 100644 index 0000000..c828f7e --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/purego.go @@ -0,0 +1,15 @@ +// +build purego appengine + +package msgp + +// let's just assume appengine +// uses 64-bit hardware... +const smallint = false + +func UnsafeString(b []byte) string { + return string(b) +} + +func UnsafeBytes(s string) []byte { + return []byte(s) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/read.go b/vendor/github.com/tinylib/msgp/msgp/read.go new file mode 100644 index 0000000..fe2de9e --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/read.go @@ -0,0 +1,1363 @@ +package msgp + +import ( + "io" + "math" + "sync" + "time" + + "github.com/philhofer/fwd" +) + +// where we keep old *Readers +var readerPool = sync.Pool{New: func() interface{} { return &Reader{} }} + +// Type is a MessagePack wire type, +// including this package's built-in +// extension types. +type Type byte + +// MessagePack Types +// +// The zero value of Type +// is InvalidType. +const ( + InvalidType Type = iota + + // MessagePack built-in types + + StrType + BinType + MapType + ArrayType + Float64Type + Float32Type + BoolType + IntType + UintType + NilType + ExtensionType + + // pseudo-types provided + // by extensions + + Complex64Type + Complex128Type + TimeType + + _maxtype +) + +// String implements fmt.Stringer +func (t Type) String() string { + switch t { + case StrType: + return "str" + case BinType: + return "bin" + case MapType: + return "map" + case ArrayType: + return "array" + case Float64Type: + return "float64" + case Float32Type: + return "float32" + case BoolType: + return "bool" + case UintType: + return "uint" + case IntType: + return "int" + case ExtensionType: + return "ext" + case NilType: + return "nil" + default: + return "" + } +} + +func freeR(m *Reader) { + readerPool.Put(m) +} + +// Unmarshaler is the interface fulfilled +// by objects that know how to unmarshal +// themselves from MessagePack. +// UnmarshalMsg unmarshals the object +// from binary, returing any leftover +// bytes and any errors encountered. +type Unmarshaler interface { + UnmarshalMsg([]byte) ([]byte, error) +} + +// Decodable is the interface fulfilled +// by objects that know how to read +// themselves from a *Reader. +type Decodable interface { + DecodeMsg(*Reader) error +} + +// Decode decodes 'd' from 'r'. +func Decode(r io.Reader, d Decodable) error { + rd := NewReader(r) + err := d.DecodeMsg(rd) + freeR(rd) + return err +} + +// NewReader returns a *Reader that +// reads from the provided reader. The +// reader will be buffered. +func NewReader(r io.Reader) *Reader { + p := readerPool.Get().(*Reader) + if p.R == nil { + p.R = fwd.NewReader(r) + } else { + p.R.Reset(r) + } + return p +} + +// NewReaderSize returns a *Reader with a buffer of the given size. +// (This is vastly preferable to passing the decoder a reader that is already buffered.) +func NewReaderSize(r io.Reader, sz int) *Reader { + return &Reader{R: fwd.NewReaderSize(r, sz)} +} + +// NewReaderBuf returns a *Reader with a provided buffer. +func NewReaderBuf(r io.Reader, buf []byte) *Reader { + return &Reader{R: fwd.NewReaderBuf(r, buf)} +} + +// Reader wraps an io.Reader and provides +// methods to read MessagePack-encoded values +// from it. Readers are buffered. +type Reader struct { + // R is the buffered reader + // that the Reader uses + // to decode MessagePack. + // The Reader itself + // is stateless; all the + // buffering is done + // within R. + R *fwd.Reader + scratch []byte +} + +// Read implements `io.Reader` +func (m *Reader) Read(p []byte) (int, error) { + return m.R.Read(p) +} + +// CopyNext reads the next object from m without decoding it and writes it to w. +// It avoids unnecessary copies internally. +func (m *Reader) CopyNext(w io.Writer) (int64, error) { + sz, o, err := getNextSize(m.R) + if err != nil { + return 0, err + } + + var n int64 + // Opportunistic optimization: if we can fit the whole thing in the m.R + // buffer, then just get a pointer to that, and pass it to w.Write, + // avoiding an allocation. + if int(sz) <= m.R.BufferSize() { + var nn int + var buf []byte + buf, err = m.R.Next(int(sz)) + if err != nil { + if err == io.ErrUnexpectedEOF { + err = ErrShortBytes + } + return 0, err + } + nn, err = w.Write(buf) + n += int64(nn) + } else { + // Fall back to io.CopyN. + // May avoid allocating if w is a ReaderFrom (e.g. bytes.Buffer) + n, err = io.CopyN(w, m.R, int64(sz)) + if err == io.ErrUnexpectedEOF { + err = ErrShortBytes + } + } + if err != nil { + return n, err + } else if n < int64(sz) { + return n, io.ErrShortWrite + } + + // for maps and slices, read elements + for x := uintptr(0); x < o; x++ { + var n2 int64 + n2, err = m.CopyNext(w) + if err != nil { + return n, err + } + n += n2 + } + return n, nil +} + +// ReadFull implements `io.ReadFull` +func (m *Reader) ReadFull(p []byte) (int, error) { + return m.R.ReadFull(p) +} + +// Reset resets the underlying reader. +func (m *Reader) Reset(r io.Reader) { m.R.Reset(r) } + +// Buffered returns the number of bytes currently in the read buffer. +func (m *Reader) Buffered() int { return m.R.Buffered() } + +// BufferSize returns the capacity of the read buffer. +func (m *Reader) BufferSize() int { return m.R.BufferSize() } + +// NextType returns the next object type to be decoded. +func (m *Reader) NextType() (Type, error) { + p, err := m.R.Peek(1) + if err != nil { + return InvalidType, err + } + t := getType(p[0]) + if t == InvalidType { + return t, InvalidPrefixError(p[0]) + } + if t == ExtensionType { + v, err := m.peekExtensionType() + if err != nil { + return InvalidType, err + } + switch v { + case Complex64Extension: + return Complex64Type, nil + case Complex128Extension: + return Complex128Type, nil + case TimeExtension: + return TimeType, nil + } + } + return t, nil +} + +// IsNil returns whether or not +// the next byte is a null messagepack byte +func (m *Reader) IsNil() bool { + p, err := m.R.Peek(1) + return err == nil && p[0] == mnil +} + +// getNextSize returns the size of the next object on the wire. +// returns (obj size, obj elements, error) +// only maps and arrays have non-zero obj elements +// for maps and arrays, obj size does not include elements +// +// use uintptr b/c it's guaranteed to be large enough +// to hold whatever we can fit in memory. +func getNextSize(r *fwd.Reader) (uintptr, uintptr, error) { + b, err := r.Peek(1) + if err != nil { + return 0, 0, err + } + lead := b[0] + spec := &sizes[lead] + size, mode := spec.size, spec.extra + if size == 0 { + return 0, 0, InvalidPrefixError(lead) + } + if mode >= 0 { + return uintptr(size), uintptr(mode), nil + } + b, err = r.Peek(int(size)) + if err != nil { + return 0, 0, err + } + switch mode { + case extra8: + return uintptr(size) + uintptr(b[1]), 0, nil + case extra16: + return uintptr(size) + uintptr(big.Uint16(b[1:])), 0, nil + case extra32: + return uintptr(size) + uintptr(big.Uint32(b[1:])), 0, nil + case map16v: + return uintptr(size), 2 * uintptr(big.Uint16(b[1:])), nil + case map32v: + return uintptr(size), 2 * uintptr(big.Uint32(b[1:])), nil + case array16v: + return uintptr(size), uintptr(big.Uint16(b[1:])), nil + case array32v: + return uintptr(size), uintptr(big.Uint32(b[1:])), nil + default: + return 0, 0, fatal + } +} + +// Skip skips over the next object, regardless of +// its type. If it is an array or map, the whole array +// or map will be skipped. +func (m *Reader) Skip() error { + var ( + v uintptr // bytes + o uintptr // objects + err error + p []byte + ) + + // we can use the faster + // method if we have enough + // buffered data + if m.R.Buffered() >= 5 { + p, err = m.R.Peek(5) + if err != nil { + return err + } + v, o, err = getSize(p) + if err != nil { + return err + } + } else { + v, o, err = getNextSize(m.R) + if err != nil { + return err + } + } + + // 'v' is always non-zero + // if err == nil + _, err = m.R.Skip(int(v)) + if err != nil { + return err + } + + // for maps and slices, skip elements + for x := uintptr(0); x < o; x++ { + err = m.Skip() + if err != nil { + return err + } + } + return nil +} + +// ReadMapHeader reads the next object +// as a map header and returns the size +// of the map and the number of bytes written. +// It will return a TypeError{} if the next +// object is not a map. +func (m *Reader) ReadMapHeader() (sz uint32, err error) { + var p []byte + var lead byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + if isfixmap(lead) { + sz = uint32(rfixmap(lead)) + _, err = m.R.Skip(1) + return + } + switch lead { + case mmap16: + p, err = m.R.Next(3) + if err != nil { + return + } + sz = uint32(big.Uint16(p[1:])) + return + case mmap32: + p, err = m.R.Next(5) + if err != nil { + return + } + sz = big.Uint32(p[1:]) + return + default: + err = badPrefix(MapType, lead) + return + } +} + +// ReadMapKey reads either a 'str' or 'bin' field from +// the reader and returns the value as a []byte. It uses +// scratch for storage if it is large enough. +func (m *Reader) ReadMapKey(scratch []byte) ([]byte, error) { + out, err := m.ReadStringAsBytes(scratch) + if err != nil { + if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType { + return m.ReadBytes(scratch) + } + return nil, err + } + return out, nil +} + +// MapKeyPtr returns a []byte pointing to the contents +// of a valid map key. The key cannot be empty, and it +// must be shorter than the total buffer size of the +// *Reader. Additionally, the returned slice is only +// valid until the next *Reader method call. Users +// should exercise extreme care when using this +// method; writing into the returned slice may +// corrupt future reads. +func (m *Reader) ReadMapKeyPtr() ([]byte, error) { + p, err := m.R.Peek(1) + if err != nil { + return nil, err + } + lead := p[0] + var read int + if isfixstr(lead) { + read = int(rfixstr(lead)) + m.R.Skip(1) + goto fill + } + switch lead { + case mstr8, mbin8: + p, err = m.R.Next(2) + if err != nil { + return nil, err + } + read = int(p[1]) + case mstr16, mbin16: + p, err = m.R.Next(3) + if err != nil { + return nil, err + } + read = int(big.Uint16(p[1:])) + case mstr32, mbin32: + p, err = m.R.Next(5) + if err != nil { + return nil, err + } + read = int(big.Uint32(p[1:])) + default: + return nil, badPrefix(StrType, lead) + } +fill: + if read == 0 { + return nil, ErrShortBytes + } + return m.R.Next(read) +} + +// ReadArrayHeader reads the next object as an +// array header and returns the size of the array +// and the number of bytes read. +func (m *Reader) ReadArrayHeader() (sz uint32, err error) { + var lead byte + var p []byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + if isfixarray(lead) { + sz = uint32(rfixarray(lead)) + _, err = m.R.Skip(1) + return + } + switch lead { + case marray16: + p, err = m.R.Next(3) + if err != nil { + return + } + sz = uint32(big.Uint16(p[1:])) + return + + case marray32: + p, err = m.R.Next(5) + if err != nil { + return + } + sz = big.Uint32(p[1:]) + return + + default: + err = badPrefix(ArrayType, lead) + return + } +} + +// ReadNil reads a 'nil' MessagePack byte from the reader +func (m *Reader) ReadNil() error { + p, err := m.R.Peek(1) + if err != nil { + return err + } + if p[0] != mnil { + return badPrefix(NilType, p[0]) + } + _, err = m.R.Skip(1) + return err +} + +// ReadFloat64 reads a float64 from the reader. +// (If the value on the wire is encoded as a float32, +// it will be up-cast to a float64.) +func (m *Reader) ReadFloat64() (f float64, err error) { + var p []byte + p, err = m.R.Peek(9) + if err != nil { + // we'll allow a coversion from float32 to float64, + // since we don't lose any precision + if err == io.EOF && len(p) > 0 && p[0] == mfloat32 { + ef, err := m.ReadFloat32() + return float64(ef), err + } + return + } + if p[0] != mfloat64 { + // see above + if p[0] == mfloat32 { + ef, err := m.ReadFloat32() + return float64(ef), err + } + err = badPrefix(Float64Type, p[0]) + return + } + f = math.Float64frombits(getMuint64(p)) + _, err = m.R.Skip(9) + return +} + +// ReadFloat32 reads a float32 from the reader +func (m *Reader) ReadFloat32() (f float32, err error) { + var p []byte + p, err = m.R.Peek(5) + if err != nil { + return + } + if p[0] != mfloat32 { + err = badPrefix(Float32Type, p[0]) + return + } + f = math.Float32frombits(getMuint32(p)) + _, err = m.R.Skip(5) + return +} + +// ReadBool reads a bool from the reader +func (m *Reader) ReadBool() (b bool, err error) { + var p []byte + p, err = m.R.Peek(1) + if err != nil { + return + } + switch p[0] { + case mtrue: + b = true + case mfalse: + default: + err = badPrefix(BoolType, p[0]) + return + } + _, err = m.R.Skip(1) + return +} + +// ReadInt64 reads an int64 from the reader +func (m *Reader) ReadInt64() (i int64, err error) { + var p []byte + var lead byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + + if isfixint(lead) { + i = int64(rfixint(lead)) + _, err = m.R.Skip(1) + return + } else if isnfixint(lead) { + i = int64(rnfixint(lead)) + _, err = m.R.Skip(1) + return + } + + switch lead { + case mint8: + p, err = m.R.Next(2) + if err != nil { + return + } + i = int64(getMint8(p)) + return + + case muint8: + p, err = m.R.Next(2) + if err != nil { + return + } + i = int64(getMuint8(p)) + return + + case mint16: + p, err = m.R.Next(3) + if err != nil { + return + } + i = int64(getMint16(p)) + return + + case muint16: + p, err = m.R.Next(3) + if err != nil { + return + } + i = int64(getMuint16(p)) + return + + case mint32: + p, err = m.R.Next(5) + if err != nil { + return + } + i = int64(getMint32(p)) + return + + case muint32: + p, err = m.R.Next(5) + if err != nil { + return + } + i = int64(getMuint32(p)) + return + + case mint64: + p, err = m.R.Next(9) + if err != nil { + return + } + i = getMint64(p) + return + + case muint64: + p, err = m.R.Next(9) + if err != nil { + return + } + u := getMuint64(p) + if u > math.MaxInt64 { + err = UintOverflow{Value: u, FailedBitsize: 64} + return + } + i = int64(u) + return + + default: + err = badPrefix(IntType, lead) + return + } +} + +// ReadInt32 reads an int32 from the reader +func (m *Reader) ReadInt32() (i int32, err error) { + var in int64 + in, err = m.ReadInt64() + if in > math.MaxInt32 || in < math.MinInt32 { + err = IntOverflow{Value: in, FailedBitsize: 32} + return + } + i = int32(in) + return +} + +// ReadInt16 reads an int16 from the reader +func (m *Reader) ReadInt16() (i int16, err error) { + var in int64 + in, err = m.ReadInt64() + if in > math.MaxInt16 || in < math.MinInt16 { + err = IntOverflow{Value: in, FailedBitsize: 16} + return + } + i = int16(in) + return +} + +// ReadInt8 reads an int8 from the reader +func (m *Reader) ReadInt8() (i int8, err error) { + var in int64 + in, err = m.ReadInt64() + if in > math.MaxInt8 || in < math.MinInt8 { + err = IntOverflow{Value: in, FailedBitsize: 8} + return + } + i = int8(in) + return +} + +// ReadInt reads an int from the reader +func (m *Reader) ReadInt() (i int, err error) { + if smallint { + var in int32 + in, err = m.ReadInt32() + i = int(in) + return + } + var in int64 + in, err = m.ReadInt64() + i = int(in) + return +} + +// ReadUint64 reads a uint64 from the reader +func (m *Reader) ReadUint64() (u uint64, err error) { + var p []byte + var lead byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + if isfixint(lead) { + u = uint64(rfixint(lead)) + _, err = m.R.Skip(1) + return + } + switch lead { + case mint8: + p, err = m.R.Next(2) + if err != nil { + return + } + v := int64(getMint8(p)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + return + + case muint8: + p, err = m.R.Next(2) + if err != nil { + return + } + u = uint64(getMuint8(p)) + return + + case mint16: + p, err = m.R.Next(3) + if err != nil { + return + } + v := int64(getMint16(p)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + return + + case muint16: + p, err = m.R.Next(3) + if err != nil { + return + } + u = uint64(getMuint16(p)) + return + + case mint32: + p, err = m.R.Next(5) + if err != nil { + return + } + v := int64(getMint32(p)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + return + + case muint32: + p, err = m.R.Next(5) + if err != nil { + return + } + u = uint64(getMuint32(p)) + return + + case mint64: + p, err = m.R.Next(9) + if err != nil { + return + } + v := int64(getMint64(p)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + return + + case muint64: + p, err = m.R.Next(9) + if err != nil { + return + } + u = getMuint64(p) + return + + default: + if isnfixint(lead) { + err = UintBelowZero{Value: int64(rnfixint(lead))} + } else { + err = badPrefix(UintType, lead) + } + return + + } +} + +// ReadUint32 reads a uint32 from the reader +func (m *Reader) ReadUint32() (u uint32, err error) { + var in uint64 + in, err = m.ReadUint64() + if in > math.MaxUint32 { + err = UintOverflow{Value: in, FailedBitsize: 32} + return + } + u = uint32(in) + return +} + +// ReadUint16 reads a uint16 from the reader +func (m *Reader) ReadUint16() (u uint16, err error) { + var in uint64 + in, err = m.ReadUint64() + if in > math.MaxUint16 { + err = UintOverflow{Value: in, FailedBitsize: 16} + return + } + u = uint16(in) + return +} + +// ReadUint8 reads a uint8 from the reader +func (m *Reader) ReadUint8() (u uint8, err error) { + var in uint64 + in, err = m.ReadUint64() + if in > math.MaxUint8 { + err = UintOverflow{Value: in, FailedBitsize: 8} + return + } + u = uint8(in) + return +} + +// ReadUint reads a uint from the reader +func (m *Reader) ReadUint() (u uint, err error) { + if smallint { + var un uint32 + un, err = m.ReadUint32() + u = uint(un) + return + } + var un uint64 + un, err = m.ReadUint64() + u = uint(un) + return +} + +// ReadByte is analogous to ReadUint8. +// +// NOTE: this is *not* an implementation +// of io.ByteReader. +func (m *Reader) ReadByte() (b byte, err error) { + var in uint64 + in, err = m.ReadUint64() + if in > math.MaxUint8 { + err = UintOverflow{Value: in, FailedBitsize: 8} + return + } + b = byte(in) + return +} + +// ReadBytes reads a MessagePack 'bin' object +// from the reader and returns its value. It may +// use 'scratch' for storage if it is non-nil. +func (m *Reader) ReadBytes(scratch []byte) (b []byte, err error) { + var p []byte + var lead byte + p, err = m.R.Peek(2) + if err != nil { + return + } + lead = p[0] + var read int64 + switch lead { + case mbin8: + read = int64(p[1]) + m.R.Skip(2) + case mbin16: + p, err = m.R.Next(3) + if err != nil { + return + } + read = int64(big.Uint16(p[1:])) + case mbin32: + p, err = m.R.Next(5) + if err != nil { + return + } + read = int64(big.Uint32(p[1:])) + default: + err = badPrefix(BinType, lead) + return + } + if int64(cap(scratch)) < read { + b = make([]byte, read) + } else { + b = scratch[0:read] + } + _, err = m.R.ReadFull(b) + return +} + +// ReadBytesHeader reads the size header +// of a MessagePack 'bin' object. The user +// is responsible for dealing with the next +// 'sz' bytes from the reader in an application-specific +// way. +func (m *Reader) ReadBytesHeader() (sz uint32, err error) { + var p []byte + p, err = m.R.Peek(1) + if err != nil { + return + } + switch p[0] { + case mbin8: + p, err = m.R.Next(2) + if err != nil { + return + } + sz = uint32(p[1]) + return + case mbin16: + p, err = m.R.Next(3) + if err != nil { + return + } + sz = uint32(big.Uint16(p[1:])) + return + case mbin32: + p, err = m.R.Next(5) + if err != nil { + return + } + sz = uint32(big.Uint32(p[1:])) + return + default: + err = badPrefix(BinType, p[0]) + return + } +} + +// ReadExactBytes reads a MessagePack 'bin'-encoded +// object off of the wire into the provided slice. An +// ArrayError will be returned if the object is not +// exactly the length of the input slice. +func (m *Reader) ReadExactBytes(into []byte) error { + p, err := m.R.Peek(2) + if err != nil { + return err + } + lead := p[0] + var read int64 // bytes to read + var skip int // prefix size to skip + switch lead { + case mbin8: + read = int64(p[1]) + skip = 2 + case mbin16: + p, err = m.R.Peek(3) + if err != nil { + return err + } + read = int64(big.Uint16(p[1:])) + skip = 3 + case mbin32: + p, err = m.R.Peek(5) + if err != nil { + return err + } + read = int64(big.Uint32(p[1:])) + skip = 5 + default: + return badPrefix(BinType, lead) + } + if read != int64(len(into)) { + return ArrayError{Wanted: uint32(len(into)), Got: uint32(read)} + } + m.R.Skip(skip) + _, err = m.R.ReadFull(into) + return err +} + +// ReadStringAsBytes reads a MessagePack 'str' (utf-8) string +// and returns its value as bytes. It may use 'scratch' for storage +// if it is non-nil. +func (m *Reader) ReadStringAsBytes(scratch []byte) (b []byte, err error) { + var p []byte + var lead byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + var read int64 + + if isfixstr(lead) { + read = int64(rfixstr(lead)) + m.R.Skip(1) + goto fill + } + + switch lead { + case mstr8: + p, err = m.R.Next(2) + if err != nil { + return + } + read = int64(uint8(p[1])) + case mstr16: + p, err = m.R.Next(3) + if err != nil { + return + } + read = int64(big.Uint16(p[1:])) + case mstr32: + p, err = m.R.Next(5) + if err != nil { + return + } + read = int64(big.Uint32(p[1:])) + default: + err = badPrefix(StrType, lead) + return + } +fill: + if int64(cap(scratch)) < read { + b = make([]byte, read) + } else { + b = scratch[0:read] + } + _, err = m.R.ReadFull(b) + return +} + +// ReadStringHeader reads a string header +// off of the wire. The user is then responsible +// for dealing with the next 'sz' bytes from +// the reader in an application-specific manner. +func (m *Reader) ReadStringHeader() (sz uint32, err error) { + var p []byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead := p[0] + if isfixstr(lead) { + sz = uint32(rfixstr(lead)) + m.R.Skip(1) + return + } + switch lead { + case mstr8: + p, err = m.R.Next(2) + if err != nil { + return + } + sz = uint32(p[1]) + return + case mstr16: + p, err = m.R.Next(3) + if err != nil { + return + } + sz = uint32(big.Uint16(p[1:])) + return + case mstr32: + p, err = m.R.Next(5) + if err != nil { + return + } + sz = big.Uint32(p[1:]) + return + default: + err = badPrefix(StrType, lead) + return + } +} + +// ReadString reads a utf-8 string from the reader +func (m *Reader) ReadString() (s string, err error) { + var p []byte + var lead byte + var read int64 + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + + if isfixstr(lead) { + read = int64(rfixstr(lead)) + m.R.Skip(1) + goto fill + } + + switch lead { + case mstr8: + p, err = m.R.Next(2) + if err != nil { + return + } + read = int64(uint8(p[1])) + case mstr16: + p, err = m.R.Next(3) + if err != nil { + return + } + read = int64(big.Uint16(p[1:])) + case mstr32: + p, err = m.R.Next(5) + if err != nil { + return + } + read = int64(big.Uint32(p[1:])) + default: + err = badPrefix(StrType, lead) + return + } +fill: + if read == 0 { + s, err = "", nil + return + } + // reading into the memory + // that will become the string + // itself has vastly superior + // worst-case performance, because + // the reader buffer doesn't have + // to be large enough to hold the string. + // the idea here is to make it more + // difficult for someone malicious + // to cause the system to run out of + // memory by sending very large strings. + // + // NOTE: this works because the argument + // passed to (*fwd.Reader).ReadFull escapes + // to the heap; its argument may, in turn, + // be passed to the underlying reader, and + // thus escape analysis *must* conclude that + // 'out' escapes. + out := make([]byte, read) + _, err = m.R.ReadFull(out) + if err != nil { + return + } + s = UnsafeString(out) + return +} + +// ReadComplex64 reads a complex64 from the reader +func (m *Reader) ReadComplex64() (f complex64, err error) { + var p []byte + p, err = m.R.Peek(10) + if err != nil { + return + } + if p[0] != mfixext8 { + err = badPrefix(Complex64Type, p[0]) + return + } + if int8(p[1]) != Complex64Extension { + err = errExt(int8(p[1]), Complex64Extension) + return + } + f = complex(math.Float32frombits(big.Uint32(p[2:])), + math.Float32frombits(big.Uint32(p[6:]))) + _, err = m.R.Skip(10) + return +} + +// ReadComplex128 reads a complex128 from the reader +func (m *Reader) ReadComplex128() (f complex128, err error) { + var p []byte + p, err = m.R.Peek(18) + if err != nil { + return + } + if p[0] != mfixext16 { + err = badPrefix(Complex128Type, p[0]) + return + } + if int8(p[1]) != Complex128Extension { + err = errExt(int8(p[1]), Complex128Extension) + return + } + f = complex(math.Float64frombits(big.Uint64(p[2:])), + math.Float64frombits(big.Uint64(p[10:]))) + _, err = m.R.Skip(18) + return +} + +// ReadMapStrIntf reads a MessagePack map into a map[string]interface{}. +// (You must pass a non-nil map into the function.) +func (m *Reader) ReadMapStrIntf(mp map[string]interface{}) (err error) { + var sz uint32 + sz, err = m.ReadMapHeader() + if err != nil { + return + } + for key := range mp { + delete(mp, key) + } + for i := uint32(0); i < sz; i++ { + var key string + var val interface{} + key, err = m.ReadString() + if err != nil { + return + } + val, err = m.ReadIntf() + if err != nil { + return + } + mp[key] = val + } + return +} + +// ReadTime reads a time.Time object from the reader. +// The returned time's location will be set to time.Local. +func (m *Reader) ReadTime() (t time.Time, err error) { + var p []byte + p, err = m.R.Peek(15) + if err != nil { + return + } + if p[0] != mext8 || p[1] != 12 { + err = badPrefix(TimeType, p[0]) + return + } + if int8(p[2]) != TimeExtension { + err = errExt(int8(p[2]), TimeExtension) + return + } + sec, nsec := getUnix(p[3:]) + t = time.Unix(sec, int64(nsec)).Local() + _, err = m.R.Skip(15) + return +} + +// ReadIntf reads out the next object as a raw interface{}. +// Arrays are decoded as []interface{}, and maps are decoded +// as map[string]interface{}. Integers are decoded as int64 +// and unsigned integers are decoded as uint64. +func (m *Reader) ReadIntf() (i interface{}, err error) { + var t Type + t, err = m.NextType() + if err != nil { + return + } + switch t { + case BoolType: + i, err = m.ReadBool() + return + + case IntType: + i, err = m.ReadInt64() + return + + case UintType: + i, err = m.ReadUint64() + return + + case BinType: + i, err = m.ReadBytes(nil) + return + + case StrType: + i, err = m.ReadString() + return + + case Complex64Type: + i, err = m.ReadComplex64() + return + + case Complex128Type: + i, err = m.ReadComplex128() + return + + case TimeType: + i, err = m.ReadTime() + return + + case ExtensionType: + var t int8 + t, err = m.peekExtensionType() + if err != nil { + return + } + f, ok := extensionReg[t] + if ok { + e := f() + err = m.ReadExtension(e) + i = e + return + } + var e RawExtension + e.Type = t + err = m.ReadExtension(&e) + i = &e + return + + case MapType: + mp := make(map[string]interface{}) + err = m.ReadMapStrIntf(mp) + i = mp + return + + case NilType: + err = m.ReadNil() + i = nil + return + + case Float32Type: + i, err = m.ReadFloat32() + return + + case Float64Type: + i, err = m.ReadFloat64() + return + + case ArrayType: + var sz uint32 + sz, err = m.ReadArrayHeader() + + if err != nil { + return + } + out := make([]interface{}, int(sz)) + for j := range out { + out[j], err = m.ReadIntf() + if err != nil { + return + } + } + i = out + return + + default: + return nil, fatal // unreachable + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/read_bytes.go b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go new file mode 100644 index 0000000..e419975 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go @@ -0,0 +1,1197 @@ +package msgp + +import ( + "bytes" + "encoding/binary" + "math" + "time" +) + +var big = binary.BigEndian + +// NextType returns the type of the next +// object in the slice. If the length +// of the input is zero, it returns +// InvalidType. +func NextType(b []byte) Type { + if len(b) == 0 { + return InvalidType + } + spec := sizes[b[0]] + t := spec.typ + if t == ExtensionType && len(b) > int(spec.size) { + var tp int8 + if spec.extra == constsize { + tp = int8(b[1]) + } else { + tp = int8(b[spec.size-1]) + } + switch tp { + case TimeExtension: + return TimeType + case Complex128Extension: + return Complex128Type + case Complex64Extension: + return Complex64Type + default: + return ExtensionType + } + } + return t +} + +// IsNil returns true if len(b)>0 and +// the leading byte is a 'nil' MessagePack +// byte; false otherwise +func IsNil(b []byte) bool { + if len(b) != 0 && b[0] == mnil { + return true + } + return false +} + +// Raw is raw MessagePack. +// Raw allows you to read and write +// data without interpreting its contents. +type Raw []byte + +// MarshalMsg implements msgp.Marshaler. +// It appends the raw contents of 'raw' +// to the provided byte slice. If 'raw' +// is 0 bytes, 'nil' will be appended instead. +func (r Raw) MarshalMsg(b []byte) ([]byte, error) { + i := len(r) + if i == 0 { + return AppendNil(b), nil + } + o, l := ensure(b, i) + copy(o[l:], []byte(r)) + return o, nil +} + +// UnmarshalMsg implements msgp.Unmarshaler. +// It sets the contents of *Raw to be the next +// object in the provided byte slice. +func (r *Raw) UnmarshalMsg(b []byte) ([]byte, error) { + l := len(b) + out, err := Skip(b) + if err != nil { + return b, err + } + rlen := l - len(out) + if IsNil(b[:rlen]) { + rlen = 0 + } + if cap(*r) < rlen { + *r = make(Raw, rlen) + } else { + *r = (*r)[0:rlen] + } + copy(*r, b[:rlen]) + return out, nil +} + +// EncodeMsg implements msgp.Encodable. +// It writes the raw bytes to the writer. +// If r is empty, it writes 'nil' instead. +func (r Raw) EncodeMsg(w *Writer) error { + if len(r) == 0 { + return w.WriteNil() + } + _, err := w.Write([]byte(r)) + return err +} + +// DecodeMsg implements msgp.Decodable. +// It sets the value of *Raw to be the +// next object on the wire. +func (r *Raw) DecodeMsg(f *Reader) error { + *r = (*r)[:0] + err := appendNext(f, (*[]byte)(r)) + if IsNil(*r) { + *r = (*r)[:0] + } + return err +} + +// Msgsize implements msgp.Sizer +func (r Raw) Msgsize() int { + l := len(r) + if l == 0 { + return 1 // for 'nil' + } + return l +} + +func appendNext(f *Reader, d *[]byte) error { + amt, o, err := getNextSize(f.R) + if err != nil { + return err + } + var i int + *d, i = ensure(*d, int(amt)) + _, err = f.R.ReadFull((*d)[i:]) + if err != nil { + return err + } + for o > 0 { + err = appendNext(f, d) + if err != nil { + return err + } + o-- + } + return nil +} + +// MarshalJSON implements json.Marshaler +func (r *Raw) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + _, err := UnmarshalAsJSON(&buf, []byte(*r)) + return buf.Bytes(), err +} + +// ReadMapHeaderBytes reads a map header size +// from 'b' and returns the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a map) +func ReadMapHeaderBytes(b []byte) (sz uint32, o []byte, err error) { + l := len(b) + if l < 1 { + err = ErrShortBytes + return + } + + lead := b[0] + if isfixmap(lead) { + sz = uint32(rfixmap(lead)) + o = b[1:] + return + } + + switch lead { + case mmap16: + if l < 3 { + err = ErrShortBytes + return + } + sz = uint32(big.Uint16(b[1:])) + o = b[3:] + return + + case mmap32: + if l < 5 { + err = ErrShortBytes + return + } + sz = big.Uint32(b[1:]) + o = b[5:] + return + + default: + err = badPrefix(MapType, lead) + return + } +} + +// ReadMapKeyZC attempts to read a map key +// from 'b' and returns the key bytes and the remaining bytes +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a str or bin) +func ReadMapKeyZC(b []byte) ([]byte, []byte, error) { + o, x, err := ReadStringZC(b) + if err != nil { + if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType { + return ReadBytesZC(b) + } + return nil, b, err + } + return o, x, nil +} + +// ReadArrayHeaderBytes attempts to read +// the array header size off of 'b' and return +// the size and remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not an array) +func ReadArrayHeaderBytes(b []byte) (sz uint32, o []byte, err error) { + if len(b) < 1 { + return 0, nil, ErrShortBytes + } + lead := b[0] + if isfixarray(lead) { + sz = uint32(rfixarray(lead)) + o = b[1:] + return + } + + switch lead { + case marray16: + if len(b) < 3 { + err = ErrShortBytes + return + } + sz = uint32(big.Uint16(b[1:])) + o = b[3:] + return + + case marray32: + if len(b) < 5 { + err = ErrShortBytes + return + } + sz = big.Uint32(b[1:]) + o = b[5:] + return + + default: + err = badPrefix(ArrayType, lead) + return + } +} + +// ReadNilBytes tries to read a "nil" byte +// off of 'b' and return the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a 'nil') +// - InvalidPrefixError +func ReadNilBytes(b []byte) ([]byte, error) { + if len(b) < 1 { + return nil, ErrShortBytes + } + if b[0] != mnil { + return b, badPrefix(NilType, b[0]) + } + return b[1:], nil +} + +// ReadFloat64Bytes tries to read a float64 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a float64) +func ReadFloat64Bytes(b []byte) (f float64, o []byte, err error) { + if len(b) < 9 { + if len(b) >= 5 && b[0] == mfloat32 { + var tf float32 + tf, o, err = ReadFloat32Bytes(b) + f = float64(tf) + return + } + err = ErrShortBytes + return + } + + if b[0] != mfloat64 { + if b[0] == mfloat32 { + var tf float32 + tf, o, err = ReadFloat32Bytes(b) + f = float64(tf) + return + } + err = badPrefix(Float64Type, b[0]) + return + } + + f = math.Float64frombits(getMuint64(b)) + o = b[9:] + return +} + +// ReadFloat32Bytes tries to read a float64 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a float32) +func ReadFloat32Bytes(b []byte) (f float32, o []byte, err error) { + if len(b) < 5 { + err = ErrShortBytes + return + } + + if b[0] != mfloat32 { + err = TypeError{Method: Float32Type, Encoded: getType(b[0])} + return + } + + f = math.Float32frombits(getMuint32(b)) + o = b[5:] + return +} + +// ReadBoolBytes tries to read a float64 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a bool) +func ReadBoolBytes(b []byte) (bool, []byte, error) { + if len(b) < 1 { + return false, b, ErrShortBytes + } + switch b[0] { + case mtrue: + return true, b[1:], nil + case mfalse: + return false, b[1:], nil + default: + return false, b, badPrefix(BoolType, b[0]) + } +} + +// ReadInt64Bytes tries to read an int64 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError (not a int) +func ReadInt64Bytes(b []byte) (i int64, o []byte, err error) { + l := len(b) + if l < 1 { + return 0, nil, ErrShortBytes + } + + lead := b[0] + if isfixint(lead) { + i = int64(rfixint(lead)) + o = b[1:] + return + } + if isnfixint(lead) { + i = int64(rnfixint(lead)) + o = b[1:] + return + } + + switch lead { + case mint8: + if l < 2 { + err = ErrShortBytes + return + } + i = int64(getMint8(b)) + o = b[2:] + return + + case muint8: + if l < 2 { + err = ErrShortBytes + return + } + i = int64(getMuint8(b)) + o = b[2:] + return + + case mint16: + if l < 3 { + err = ErrShortBytes + return + } + i = int64(getMint16(b)) + o = b[3:] + return + + case muint16: + if l < 3 { + err = ErrShortBytes + return + } + i = int64(getMuint16(b)) + o = b[3:] + return + + case mint32: + if l < 5 { + err = ErrShortBytes + return + } + i = int64(getMint32(b)) + o = b[5:] + return + + case muint32: + if l < 5 { + err = ErrShortBytes + return + } + i = int64(getMuint32(b)) + o = b[5:] + return + + case mint64: + if l < 9 { + err = ErrShortBytes + return + } + i = int64(getMint64(b)) + o = b[9:] + return + + case muint64: + if l < 9 { + err = ErrShortBytes + return + } + u := getMuint64(b) + if u > math.MaxInt64 { + err = UintOverflow{Value: u, FailedBitsize: 64} + return + } + i = int64(u) + o = b[9:] + return + + default: + err = badPrefix(IntType, lead) + return + } +} + +// ReadInt32Bytes tries to read an int32 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a int) +// - IntOverflow{} (value doesn't fit in int32) +func ReadInt32Bytes(b []byte) (int32, []byte, error) { + i, o, err := ReadInt64Bytes(b) + if i > math.MaxInt32 || i < math.MinInt32 { + return 0, o, IntOverflow{Value: i, FailedBitsize: 32} + } + return int32(i), o, err +} + +// ReadInt16Bytes tries to read an int16 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a int) +// - IntOverflow{} (value doesn't fit in int16) +func ReadInt16Bytes(b []byte) (int16, []byte, error) { + i, o, err := ReadInt64Bytes(b) + if i > math.MaxInt16 || i < math.MinInt16 { + return 0, o, IntOverflow{Value: i, FailedBitsize: 16} + } + return int16(i), o, err +} + +// ReadInt8Bytes tries to read an int16 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a int) +// - IntOverflow{} (value doesn't fit in int8) +func ReadInt8Bytes(b []byte) (int8, []byte, error) { + i, o, err := ReadInt64Bytes(b) + if i > math.MaxInt8 || i < math.MinInt8 { + return 0, o, IntOverflow{Value: i, FailedBitsize: 8} + } + return int8(i), o, err +} + +// ReadIntBytes tries to read an int +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a int) +// - IntOverflow{} (value doesn't fit in int; 32-bit platforms only) +func ReadIntBytes(b []byte) (int, []byte, error) { + if smallint { + i, b, err := ReadInt32Bytes(b) + return int(i), b, err + } + i, b, err := ReadInt64Bytes(b) + return int(i), b, err +} + +// ReadUint64Bytes tries to read a uint64 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a uint) +func ReadUint64Bytes(b []byte) (u uint64, o []byte, err error) { + l := len(b) + if l < 1 { + return 0, nil, ErrShortBytes + } + + lead := b[0] + if isfixint(lead) { + u = uint64(rfixint(lead)) + o = b[1:] + return + } + + switch lead { + case mint8: + if l < 2 { + err = ErrShortBytes + return + } + v := int64(getMint8(b)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + o = b[2:] + return + + case muint8: + if l < 2 { + err = ErrShortBytes + return + } + u = uint64(getMuint8(b)) + o = b[2:] + return + + case mint16: + if l < 3 { + err = ErrShortBytes + return + } + v := int64(getMint16(b)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + o = b[3:] + return + + case muint16: + if l < 3 { + err = ErrShortBytes + return + } + u = uint64(getMuint16(b)) + o = b[3:] + return + + case mint32: + if l < 5 { + err = ErrShortBytes + return + } + v := int64(getMint32(b)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + o = b[5:] + return + + case muint32: + if l < 5 { + err = ErrShortBytes + return + } + u = uint64(getMuint32(b)) + o = b[5:] + return + + case mint64: + if l < 9 { + err = ErrShortBytes + return + } + v := int64(getMint64(b)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + o = b[9:] + return + + case muint64: + if l < 9 { + err = ErrShortBytes + return + } + u = getMuint64(b) + o = b[9:] + return + + default: + if isnfixint(lead) { + err = UintBelowZero{Value: int64(rnfixint(lead))} + } else { + err = badPrefix(UintType, lead) + } + return + } +} + +// ReadUint32Bytes tries to read a uint32 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a uint) +// - UintOverflow{} (value too large for uint32) +func ReadUint32Bytes(b []byte) (uint32, []byte, error) { + v, o, err := ReadUint64Bytes(b) + if v > math.MaxUint32 { + return 0, nil, UintOverflow{Value: v, FailedBitsize: 32} + } + return uint32(v), o, err +} + +// ReadUint16Bytes tries to read a uint16 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a uint) +// - UintOverflow{} (value too large for uint16) +func ReadUint16Bytes(b []byte) (uint16, []byte, error) { + v, o, err := ReadUint64Bytes(b) + if v > math.MaxUint16 { + return 0, nil, UintOverflow{Value: v, FailedBitsize: 16} + } + return uint16(v), o, err +} + +// ReadUint8Bytes tries to read a uint8 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a uint) +// - UintOverflow{} (value too large for uint8) +func ReadUint8Bytes(b []byte) (uint8, []byte, error) { + v, o, err := ReadUint64Bytes(b) + if v > math.MaxUint8 { + return 0, nil, UintOverflow{Value: v, FailedBitsize: 8} + } + return uint8(v), o, err +} + +// ReadUintBytes tries to read a uint +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a uint) +// - UintOverflow{} (value too large for uint; 32-bit platforms only) +func ReadUintBytes(b []byte) (uint, []byte, error) { + if smallint { + u, b, err := ReadUint32Bytes(b) + return uint(u), b, err + } + u, b, err := ReadUint64Bytes(b) + return uint(u), b, err +} + +// ReadByteBytes is analogous to ReadUint8Bytes +func ReadByteBytes(b []byte) (byte, []byte, error) { + return ReadUint8Bytes(b) +} + +// ReadBytesBytes reads a 'bin' object +// from 'b' and returns its vaue and +// the remaining bytes in 'b'. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a 'bin' object) +func ReadBytesBytes(b []byte, scratch []byte) (v []byte, o []byte, err error) { + return readBytesBytes(b, scratch, false) +} + +func readBytesBytes(b []byte, scratch []byte, zc bool) (v []byte, o []byte, err error) { + l := len(b) + if l < 1 { + return nil, nil, ErrShortBytes + } + + lead := b[0] + var read int + switch lead { + case mbin8: + if l < 2 { + err = ErrShortBytes + return + } + + read = int(b[1]) + b = b[2:] + + case mbin16: + if l < 3 { + err = ErrShortBytes + return + } + read = int(big.Uint16(b[1:])) + b = b[3:] + + case mbin32: + if l < 5 { + err = ErrShortBytes + return + } + read = int(big.Uint32(b[1:])) + b = b[5:] + + default: + err = badPrefix(BinType, lead) + return + } + + if len(b) < read { + err = ErrShortBytes + return + } + + // zero-copy + if zc { + v = b[0:read] + o = b[read:] + return + } + + if cap(scratch) >= read { + v = scratch[0:read] + } else { + v = make([]byte, read) + } + + o = b[copy(v, b):] + return +} + +// ReadBytesZC extracts the messagepack-encoded +// binary field without copying. The returned []byte +// points to the same memory as the input slice. +// Possible errors: +// - ErrShortBytes (b not long enough) +// - TypeError{} (object not 'bin') +func ReadBytesZC(b []byte) (v []byte, o []byte, err error) { + return readBytesBytes(b, nil, true) +} + +func ReadExactBytes(b []byte, into []byte) (o []byte, err error) { + l := len(b) + if l < 1 { + err = ErrShortBytes + return + } + + lead := b[0] + var read uint32 + var skip int + switch lead { + case mbin8: + if l < 2 { + err = ErrShortBytes + return + } + + read = uint32(b[1]) + skip = 2 + + case mbin16: + if l < 3 { + err = ErrShortBytes + return + } + read = uint32(big.Uint16(b[1:])) + skip = 3 + + case mbin32: + if l < 5 { + err = ErrShortBytes + return + } + read = uint32(big.Uint32(b[1:])) + skip = 5 + + default: + err = badPrefix(BinType, lead) + return + } + + if read != uint32(len(into)) { + err = ArrayError{Wanted: uint32(len(into)), Got: read} + return + } + + o = b[skip+copy(into, b[skip:]):] + return +} + +// ReadStringZC reads a messagepack string field +// without copying. The returned []byte points +// to the same memory as the input slice. +// Possible errors: +// - ErrShortBytes (b not long enough) +// - TypeError{} (object not 'str') +func ReadStringZC(b []byte) (v []byte, o []byte, err error) { + l := len(b) + if l < 1 { + return nil, nil, ErrShortBytes + } + + lead := b[0] + var read int + + if isfixstr(lead) { + read = int(rfixstr(lead)) + b = b[1:] + } else { + switch lead { + case mstr8: + if l < 2 { + err = ErrShortBytes + return + } + read = int(b[1]) + b = b[2:] + + case mstr16: + if l < 3 { + err = ErrShortBytes + return + } + read = int(big.Uint16(b[1:])) + b = b[3:] + + case mstr32: + if l < 5 { + err = ErrShortBytes + return + } + read = int(big.Uint32(b[1:])) + b = b[5:] + + default: + err = TypeError{Method: StrType, Encoded: getType(lead)} + return + } + } + + if len(b) < read { + err = ErrShortBytes + return + } + + v = b[0:read] + o = b[read:] + return +} + +// ReadStringBytes reads a 'str' object +// from 'b' and returns its value and the +// remaining bytes in 'b'. +// Possible errors: +// - ErrShortBytes (b not long enough) +// - TypeError{} (not 'str' type) +// - InvalidPrefixError +func ReadStringBytes(b []byte) (string, []byte, error) { + v, o, err := ReadStringZC(b) + return string(v), o, err +} + +// ReadStringAsBytes reads a 'str' object +// into a slice of bytes. 'v' is the value of +// the 'str' object, which may reside in memory +// pointed to by 'scratch.' 'o' is the remaining bytes +// in 'b.'' +// Possible errors: +// - ErrShortBytes (b not long enough) +// - TypeError{} (not 'str' type) +// - InvalidPrefixError (unknown type marker) +func ReadStringAsBytes(b []byte, scratch []byte) (v []byte, o []byte, err error) { + var tmp []byte + tmp, o, err = ReadStringZC(b) + v = append(scratch[:0], tmp...) + return +} + +// ReadComplex128Bytes reads a complex128 +// extension object from 'b' and returns the +// remaining bytes. +// Possible errors: +// - ErrShortBytes (not enough bytes in 'b') +// - TypeError{} (object not a complex128) +// - InvalidPrefixError +// - ExtensionTypeError{} (object an extension of the correct size, but not a complex128) +func ReadComplex128Bytes(b []byte) (c complex128, o []byte, err error) { + if len(b) < 18 { + err = ErrShortBytes + return + } + if b[0] != mfixext16 { + err = badPrefix(Complex128Type, b[0]) + return + } + if int8(b[1]) != Complex128Extension { + err = errExt(int8(b[1]), Complex128Extension) + return + } + c = complex(math.Float64frombits(big.Uint64(b[2:])), + math.Float64frombits(big.Uint64(b[10:]))) + o = b[18:] + return +} + +// ReadComplex64Bytes reads a complex64 +// extension object from 'b' and returns the +// remaining bytes. +// Possible errors: +// - ErrShortBytes (not enough bytes in 'b') +// - TypeError{} (object not a complex64) +// - ExtensionTypeError{} (object an extension of the correct size, but not a complex64) +func ReadComplex64Bytes(b []byte) (c complex64, o []byte, err error) { + if len(b) < 10 { + err = ErrShortBytes + return + } + if b[0] != mfixext8 { + err = badPrefix(Complex64Type, b[0]) + return + } + if b[1] != Complex64Extension { + err = errExt(int8(b[1]), Complex64Extension) + return + } + c = complex(math.Float32frombits(big.Uint32(b[2:])), + math.Float32frombits(big.Uint32(b[6:]))) + o = b[10:] + return +} + +// ReadTimeBytes reads a time.Time +// extension object from 'b' and returns the +// remaining bytes. +// Possible errors: +// - ErrShortBytes (not enough bytes in 'b') +// - TypeError{} (object not a complex64) +// - ExtensionTypeError{} (object an extension of the correct size, but not a time.Time) +func ReadTimeBytes(b []byte) (t time.Time, o []byte, err error) { + if len(b) < 15 { + err = ErrShortBytes + return + } + if b[0] != mext8 || b[1] != 12 { + err = badPrefix(TimeType, b[0]) + return + } + if int8(b[2]) != TimeExtension { + err = errExt(int8(b[2]), TimeExtension) + return + } + sec, nsec := getUnix(b[3:]) + t = time.Unix(sec, int64(nsec)).Local() + o = b[15:] + return +} + +// ReadMapStrIntfBytes reads a map[string]interface{} +// out of 'b' and returns the map and remaining bytes. +// If 'old' is non-nil, the values will be read into that map. +func ReadMapStrIntfBytes(b []byte, old map[string]interface{}) (v map[string]interface{}, o []byte, err error) { + var sz uint32 + o = b + sz, o, err = ReadMapHeaderBytes(o) + + if err != nil { + return + } + + if old != nil { + for key := range old { + delete(old, key) + } + v = old + } else { + v = make(map[string]interface{}, int(sz)) + } + + for z := uint32(0); z < sz; z++ { + if len(o) < 1 { + err = ErrShortBytes + return + } + var key []byte + key, o, err = ReadMapKeyZC(o) + if err != nil { + return + } + var val interface{} + val, o, err = ReadIntfBytes(o) + if err != nil { + return + } + v[string(key)] = val + } + return +} + +// ReadIntfBytes attempts to read +// the next object out of 'b' as a raw interface{} and +// return the remaining bytes. +func ReadIntfBytes(b []byte) (i interface{}, o []byte, err error) { + if len(b) < 1 { + err = ErrShortBytes + return + } + + k := NextType(b) + + switch k { + case MapType: + i, o, err = ReadMapStrIntfBytes(b, nil) + return + + case ArrayType: + var sz uint32 + sz, o, err = ReadArrayHeaderBytes(b) + if err != nil { + return + } + j := make([]interface{}, int(sz)) + i = j + for d := range j { + j[d], o, err = ReadIntfBytes(o) + if err != nil { + return + } + } + return + + case Float32Type: + i, o, err = ReadFloat32Bytes(b) + return + + case Float64Type: + i, o, err = ReadFloat64Bytes(b) + return + + case IntType: + i, o, err = ReadInt64Bytes(b) + return + + case UintType: + i, o, err = ReadUint64Bytes(b) + return + + case BoolType: + i, o, err = ReadBoolBytes(b) + return + + case TimeType: + i, o, err = ReadTimeBytes(b) + return + + case Complex64Type: + i, o, err = ReadComplex64Bytes(b) + return + + case Complex128Type: + i, o, err = ReadComplex128Bytes(b) + return + + case ExtensionType: + var t int8 + t, err = peekExtension(b) + if err != nil { + return + } + // use a user-defined extension, + // if it's been registered + f, ok := extensionReg[t] + if ok { + e := f() + o, err = ReadExtensionBytes(b, e) + i = e + return + } + // last resort is a raw extension + e := RawExtension{} + e.Type = int8(t) + o, err = ReadExtensionBytes(b, &e) + i = &e + return + + case NilType: + o, err = ReadNilBytes(b) + return + + case BinType: + i, o, err = ReadBytesBytes(b, nil) + return + + case StrType: + i, o, err = ReadStringBytes(b) + return + + default: + err = InvalidPrefixError(b[0]) + return + } +} + +// Skip skips the next object in 'b' and +// returns the remaining bytes. If the object +// is a map or array, all of its elements +// will be skipped. +// Possible Errors: +// - ErrShortBytes (not enough bytes in b) +// - InvalidPrefixError (bad encoding) +func Skip(b []byte) ([]byte, error) { + sz, asz, err := getSize(b) + if err != nil { + return b, err + } + if uintptr(len(b)) < sz { + return b, ErrShortBytes + } + b = b[sz:] + for asz > 0 { + b, err = Skip(b) + if err != nil { + return b, err + } + asz-- + } + return b, nil +} + +// returns (skip N bytes, skip M objects, error) +func getSize(b []byte) (uintptr, uintptr, error) { + l := len(b) + if l == 0 { + return 0, 0, ErrShortBytes + } + lead := b[0] + spec := &sizes[lead] // get type information + size, mode := spec.size, spec.extra + if size == 0 { + return 0, 0, InvalidPrefixError(lead) + } + if mode >= 0 { // fixed composites + return uintptr(size), uintptr(mode), nil + } + if l < int(size) { + return 0, 0, ErrShortBytes + } + switch mode { + case extra8: + return uintptr(size) + uintptr(b[1]), 0, nil + case extra16: + return uintptr(size) + uintptr(big.Uint16(b[1:])), 0, nil + case extra32: + return uintptr(size) + uintptr(big.Uint32(b[1:])), 0, nil + case map16v: + return uintptr(size), 2 * uintptr(big.Uint16(b[1:])), nil + case map32v: + return uintptr(size), 2 * uintptr(big.Uint32(b[1:])), nil + case array16v: + return uintptr(size), uintptr(big.Uint16(b[1:])), nil + case array32v: + return uintptr(size), uintptr(big.Uint32(b[1:])), nil + default: + return 0, 0, fatal + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/size.go b/vendor/github.com/tinylib/msgp/msgp/size.go new file mode 100644 index 0000000..ce2f8b1 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/size.go @@ -0,0 +1,38 @@ +package msgp + +// The sizes provided +// are the worst-case +// encoded sizes for +// each type. For variable- +// length types ([]byte, string), +// the total encoded size is +// the prefix size plus the +// length of the object. +const ( + Int64Size = 9 + IntSize = Int64Size + UintSize = Int64Size + Int8Size = 2 + Int16Size = 3 + Int32Size = 5 + Uint8Size = 2 + ByteSize = Uint8Size + Uint16Size = 3 + Uint32Size = 5 + Uint64Size = Int64Size + Float64Size = 9 + Float32Size = 5 + Complex64Size = 10 + Complex128Size = 18 + + TimeSize = 15 + BoolSize = 1 + NilSize = 1 + + MapHeaderSize = 5 + ArrayHeaderSize = 5 + + BytesPrefixSize = 5 + StringPrefixSize = 5 + ExtensionPrefixSize = 6 +) diff --git a/vendor/github.com/tinylib/msgp/msgp/unsafe.go b/vendor/github.com/tinylib/msgp/msgp/unsafe.go new file mode 100644 index 0000000..d9fb353 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/unsafe.go @@ -0,0 +1,36 @@ +// +build !purego,!appengine + +package msgp + +import ( + "unsafe" +) + +// NOTE: +// all of the definition in this file +// should be repeated in appengine.go, +// but without using unsafe + +const ( + // spec says int and uint are always + // the same size, but that int/uint + // size may not be machine word size + smallint = unsafe.Sizeof(int(0)) == 4 +) + +// UnsafeString returns the byte slice as a volatile string +// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR. +// THIS IS EVIL CODE. +// YOU HAVE BEEN WARNED. +func UnsafeString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +// UnsafeBytes returns the string as a byte slice +// +// Deprecated: +// Since this code is no longer used by the code generator, +// UnsafeBytes(s) is precisely equivalent to []byte(s) +func UnsafeBytes(s string) []byte { + return []byte(s) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/write.go b/vendor/github.com/tinylib/msgp/msgp/write.go new file mode 100644 index 0000000..407ec1f --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/write.go @@ -0,0 +1,861 @@ +package msgp + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "sync" + "time" +) + +const ( + // min buffer size for the writer + minWriterSize = 18 +) + +// Sizer is an interface implemented +// by types that can estimate their +// size when MessagePack encoded. +// This interface is optional, but +// encoding/marshaling implementations +// may use this as a way to pre-allocate +// memory for serialization. +type Sizer interface { + Msgsize() int +} + +var ( + // Nowhere is an io.Writer to nowhere + Nowhere io.Writer = nwhere{} + + btsType = reflect.TypeOf(([]byte)(nil)) + writerPool = sync.Pool{ + New: func() interface{} { + return &Writer{buf: make([]byte, 2048)} + }, + } +) + +func popWriter(w io.Writer) *Writer { + wr := writerPool.Get().(*Writer) + wr.Reset(w) + return wr +} + +func pushWriter(wr *Writer) { + wr.w = nil + wr.wloc = 0 + writerPool.Put(wr) +} + +// freeW frees a writer for use +// by other processes. It is not necessary +// to call freeW on a writer. However, maintaining +// a reference to a *Writer after calling freeW on +// it will cause undefined behavior. +func freeW(w *Writer) { pushWriter(w) } + +// Require ensures that cap(old)-len(old) >= extra. +func Require(old []byte, extra int) []byte { + l := len(old) + c := cap(old) + r := l + extra + if c >= r { + return old + } else if l == 0 { + return make([]byte, 0, extra) + } + // the new size is the greater + // of double the old capacity + // and the sum of the old length + // and the number of new bytes + // necessary. + c <<= 1 + if c < r { + c = r + } + n := make([]byte, l, c) + copy(n, old) + return n +} + +// nowhere writer +type nwhere struct{} + +func (n nwhere) Write(p []byte) (int, error) { return len(p), nil } + +// Marshaler is the interface implemented +// by types that know how to marshal themselves +// as MessagePack. MarshalMsg appends the marshalled +// form of the object to the provided +// byte slice, returning the extended +// slice and any errors encountered. +type Marshaler interface { + MarshalMsg([]byte) ([]byte, error) +} + +// Encodable is the interface implemented +// by types that know how to write themselves +// as MessagePack using a *msgp.Writer. +type Encodable interface { + EncodeMsg(*Writer) error +} + +// Writer is a buffered writer +// that can be used to write +// MessagePack objects to an io.Writer. +// You must call *Writer.Flush() in order +// to flush all of the buffered data +// to the underlying writer. +type Writer struct { + w io.Writer + buf []byte + wloc int +} + +// NewWriter returns a new *Writer. +func NewWriter(w io.Writer) *Writer { + if wr, ok := w.(*Writer); ok { + return wr + } + return popWriter(w) +} + +// NewWriterSize returns a writer with a custom buffer size. +func NewWriterSize(w io.Writer, sz int) *Writer { + // we must be able to require() 'minWriterSize' + // contiguous bytes, so that is the + // practical minimum buffer size + if sz < minWriterSize { + sz = minWriterSize + } + buf := make([]byte, sz) + return NewWriterBuf(w, buf) +} + +// NewWriterBuf returns a writer with a provided buffer. +// 'buf' is not used when the capacity is smaller than 18, +// custom buffer is allocated instead. +func NewWriterBuf(w io.Writer, buf []byte) *Writer { + if cap(buf) < minWriterSize { + buf = make([]byte, minWriterSize) + } + buf = buf[:cap(buf)] + return &Writer{ + w: w, + buf: buf, + } +} + +// Encode encodes an Encodable to an io.Writer. +func Encode(w io.Writer, e Encodable) error { + wr := NewWriter(w) + err := e.EncodeMsg(wr) + if err == nil { + err = wr.Flush() + } + freeW(wr) + return err +} + +func (mw *Writer) flush() error { + if mw.wloc == 0 { + return nil + } + n, err := mw.w.Write(mw.buf[:mw.wloc]) + if err != nil { + if n > 0 { + mw.wloc = copy(mw.buf, mw.buf[n:mw.wloc]) + } + return err + } + mw.wloc = 0 + return nil +} + +// Flush flushes all of the buffered +// data to the underlying writer. +func (mw *Writer) Flush() error { return mw.flush() } + +// Buffered returns the number bytes in the write buffer +func (mw *Writer) Buffered() int { return len(mw.buf) - mw.wloc } + +func (mw *Writer) avail() int { return len(mw.buf) - mw.wloc } + +func (mw *Writer) bufsize() int { return len(mw.buf) } + +// NOTE: this should only be called with +// a number that is guaranteed to be less than +// len(mw.buf). typically, it is called with a constant. +// +// NOTE: this is a hot code path +func (mw *Writer) require(n int) (int, error) { + c := len(mw.buf) + wl := mw.wloc + if c-wl < n { + if err := mw.flush(); err != nil { + return 0, err + } + wl = mw.wloc + } + mw.wloc += n + return wl, nil +} + +func (mw *Writer) Append(b ...byte) error { + if mw.avail() < len(b) { + err := mw.flush() + if err != nil { + return err + } + } + mw.wloc += copy(mw.buf[mw.wloc:], b) + return nil +} + +// push one byte onto the buffer +// +// NOTE: this is a hot code path +func (mw *Writer) push(b byte) error { + if mw.wloc == len(mw.buf) { + if err := mw.flush(); err != nil { + return err + } + } + mw.buf[mw.wloc] = b + mw.wloc++ + return nil +} + +func (mw *Writer) prefix8(b byte, u uint8) error { + const need = 2 + if len(mw.buf)-mw.wloc < need { + if err := mw.flush(); err != nil { + return err + } + } + prefixu8(mw.buf[mw.wloc:], b, u) + mw.wloc += need + return nil +} + +func (mw *Writer) prefix16(b byte, u uint16) error { + const need = 3 + if len(mw.buf)-mw.wloc < need { + if err := mw.flush(); err != nil { + return err + } + } + prefixu16(mw.buf[mw.wloc:], b, u) + mw.wloc += need + return nil +} + +func (mw *Writer) prefix32(b byte, u uint32) error { + const need = 5 + if len(mw.buf)-mw.wloc < need { + if err := mw.flush(); err != nil { + return err + } + } + prefixu32(mw.buf[mw.wloc:], b, u) + mw.wloc += need + return nil +} + +func (mw *Writer) prefix64(b byte, u uint64) error { + const need = 9 + if len(mw.buf)-mw.wloc < need { + if err := mw.flush(); err != nil { + return err + } + } + prefixu64(mw.buf[mw.wloc:], b, u) + mw.wloc += need + return nil +} + +// Write implements io.Writer, and writes +// data directly to the buffer. +func (mw *Writer) Write(p []byte) (int, error) { + l := len(p) + if mw.avail() < l { + if err := mw.flush(); err != nil { + return 0, err + } + if l > len(mw.buf) { + return mw.w.Write(p) + } + } + mw.wloc += copy(mw.buf[mw.wloc:], p) + return l, nil +} + +// implements io.WriteString +func (mw *Writer) writeString(s string) error { + l := len(s) + if mw.avail() < l { + if err := mw.flush(); err != nil { + return err + } + if l > len(mw.buf) { + _, err := io.WriteString(mw.w, s) + return err + } + } + mw.wloc += copy(mw.buf[mw.wloc:], s) + return nil +} + +// Reset changes the underlying writer used by the Writer +func (mw *Writer) Reset(w io.Writer) { + mw.buf = mw.buf[:cap(mw.buf)] + mw.w = w + mw.wloc = 0 +} + +// WriteMapHeader writes a map header of the given +// size to the writer +func (mw *Writer) WriteMapHeader(sz uint32) error { + switch { + case sz <= 15: + return mw.push(wfixmap(uint8(sz))) + case sz <= math.MaxUint16: + return mw.prefix16(mmap16, uint16(sz)) + default: + return mw.prefix32(mmap32, sz) + } +} + +// WriteArrayHeader writes an array header of the +// given size to the writer +func (mw *Writer) WriteArrayHeader(sz uint32) error { + switch { + case sz <= 15: + return mw.push(wfixarray(uint8(sz))) + case sz <= math.MaxUint16: + return mw.prefix16(marray16, uint16(sz)) + default: + return mw.prefix32(marray32, sz) + } +} + +// WriteNil writes a nil byte to the buffer +func (mw *Writer) WriteNil() error { + return mw.push(mnil) +} + +// WriteFloat64 writes a float64 to the writer +func (mw *Writer) WriteFloat64(f float64) error { + return mw.prefix64(mfloat64, math.Float64bits(f)) +} + +// WriteFloat32 writes a float32 to the writer +func (mw *Writer) WriteFloat32(f float32) error { + return mw.prefix32(mfloat32, math.Float32bits(f)) +} + +// WriteInt64 writes an int64 to the writer +func (mw *Writer) WriteInt64(i int64) error { + if i >= 0 { + switch { + case i <= math.MaxInt8: + return mw.push(wfixint(uint8(i))) + case i <= math.MaxInt16: + return mw.prefix16(mint16, uint16(i)) + case i <= math.MaxInt32: + return mw.prefix32(mint32, uint32(i)) + default: + return mw.prefix64(mint64, uint64(i)) + } + } + switch { + case i >= -32: + return mw.push(wnfixint(int8(i))) + case i >= math.MinInt8: + return mw.prefix8(mint8, uint8(i)) + case i >= math.MinInt16: + return mw.prefix16(mint16, uint16(i)) + case i >= math.MinInt32: + return mw.prefix32(mint32, uint32(i)) + default: + return mw.prefix64(mint64, uint64(i)) + } +} + +// WriteInt8 writes an int8 to the writer +func (mw *Writer) WriteInt8(i int8) error { return mw.WriteInt64(int64(i)) } + +// WriteInt16 writes an int16 to the writer +func (mw *Writer) WriteInt16(i int16) error { return mw.WriteInt64(int64(i)) } + +// WriteInt32 writes an int32 to the writer +func (mw *Writer) WriteInt32(i int32) error { return mw.WriteInt64(int64(i)) } + +// WriteInt writes an int to the writer +func (mw *Writer) WriteInt(i int) error { return mw.WriteInt64(int64(i)) } + +// WriteUint64 writes a uint64 to the writer +func (mw *Writer) WriteUint64(u uint64) error { + switch { + case u <= (1<<7)-1: + return mw.push(wfixint(uint8(u))) + case u <= math.MaxUint8: + return mw.prefix8(muint8, uint8(u)) + case u <= math.MaxUint16: + return mw.prefix16(muint16, uint16(u)) + case u <= math.MaxUint32: + return mw.prefix32(muint32, uint32(u)) + default: + return mw.prefix64(muint64, u) + } +} + +// WriteByte is analogous to WriteUint8 +func (mw *Writer) WriteByte(u byte) error { return mw.WriteUint8(uint8(u)) } + +// WriteUint8 writes a uint8 to the writer +func (mw *Writer) WriteUint8(u uint8) error { return mw.WriteUint64(uint64(u)) } + +// WriteUint16 writes a uint16 to the writer +func (mw *Writer) WriteUint16(u uint16) error { return mw.WriteUint64(uint64(u)) } + +// WriteUint32 writes a uint32 to the writer +func (mw *Writer) WriteUint32(u uint32) error { return mw.WriteUint64(uint64(u)) } + +// WriteUint writes a uint to the writer +func (mw *Writer) WriteUint(u uint) error { return mw.WriteUint64(uint64(u)) } + +// WriteBytes writes binary as 'bin' to the writer +func (mw *Writer) WriteBytes(b []byte) error { + sz := uint32(len(b)) + var err error + switch { + case sz <= math.MaxUint8: + err = mw.prefix8(mbin8, uint8(sz)) + case sz <= math.MaxUint16: + err = mw.prefix16(mbin16, uint16(sz)) + default: + err = mw.prefix32(mbin32, sz) + } + if err != nil { + return err + } + _, err = mw.Write(b) + return err +} + +// WriteBytesHeader writes just the size header +// of a MessagePack 'bin' object. The user is responsible +// for then writing 'sz' more bytes into the stream. +func (mw *Writer) WriteBytesHeader(sz uint32) error { + switch { + case sz <= math.MaxUint8: + return mw.prefix8(mbin8, uint8(sz)) + case sz <= math.MaxUint16: + return mw.prefix16(mbin16, uint16(sz)) + default: + return mw.prefix32(mbin32, sz) + } +} + +// WriteBool writes a bool to the writer +func (mw *Writer) WriteBool(b bool) error { + if b { + return mw.push(mtrue) + } + return mw.push(mfalse) +} + +// WriteString writes a messagepack string to the writer. +// (This is NOT an implementation of io.StringWriter) +func (mw *Writer) WriteString(s string) error { + sz := uint32(len(s)) + var err error + switch { + case sz <= 31: + err = mw.push(wfixstr(uint8(sz))) + case sz <= math.MaxUint8: + err = mw.prefix8(mstr8, uint8(sz)) + case sz <= math.MaxUint16: + err = mw.prefix16(mstr16, uint16(sz)) + default: + err = mw.prefix32(mstr32, sz) + } + if err != nil { + return err + } + return mw.writeString(s) +} + +// WriteStringHeader writes just the string size +// header of a MessagePack 'str' object. The user +// is responsible for writing 'sz' more valid UTF-8 +// bytes to the stream. +func (mw *Writer) WriteStringHeader(sz uint32) error { + switch { + case sz <= 31: + return mw.push(wfixstr(uint8(sz))) + case sz <= math.MaxUint8: + return mw.prefix8(mstr8, uint8(sz)) + case sz <= math.MaxUint16: + return mw.prefix16(mstr16, uint16(sz)) + default: + return mw.prefix32(mstr32, sz) + } +} + +// WriteStringFromBytes writes a 'str' object +// from a []byte. +func (mw *Writer) WriteStringFromBytes(str []byte) error { + sz := uint32(len(str)) + var err error + switch { + case sz <= 31: + err = mw.push(wfixstr(uint8(sz))) + case sz <= math.MaxUint8: + err = mw.prefix8(mstr8, uint8(sz)) + case sz <= math.MaxUint16: + err = mw.prefix16(mstr16, uint16(sz)) + default: + err = mw.prefix32(mstr32, sz) + } + if err != nil { + return err + } + _, err = mw.Write(str) + return err +} + +// WriteComplex64 writes a complex64 to the writer +func (mw *Writer) WriteComplex64(f complex64) error { + o, err := mw.require(10) + if err != nil { + return err + } + mw.buf[o] = mfixext8 + mw.buf[o+1] = Complex64Extension + big.PutUint32(mw.buf[o+2:], math.Float32bits(real(f))) + big.PutUint32(mw.buf[o+6:], math.Float32bits(imag(f))) + return nil +} + +// WriteComplex128 writes a complex128 to the writer +func (mw *Writer) WriteComplex128(f complex128) error { + o, err := mw.require(18) + if err != nil { + return err + } + mw.buf[o] = mfixext16 + mw.buf[o+1] = Complex128Extension + big.PutUint64(mw.buf[o+2:], math.Float64bits(real(f))) + big.PutUint64(mw.buf[o+10:], math.Float64bits(imag(f))) + return nil +} + +// WriteMapStrStr writes a map[string]string to the writer +func (mw *Writer) WriteMapStrStr(mp map[string]string) (err error) { + err = mw.WriteMapHeader(uint32(len(mp))) + if err != nil { + return + } + for key, val := range mp { + err = mw.WriteString(key) + if err != nil { + return + } + err = mw.WriteString(val) + if err != nil { + return + } + } + return nil +} + +// WriteMapStrIntf writes a map[string]interface to the writer +func (mw *Writer) WriteMapStrIntf(mp map[string]interface{}) (err error) { + err = mw.WriteMapHeader(uint32(len(mp))) + if err != nil { + return + } + for key, val := range mp { + err = mw.WriteString(key) + if err != nil { + return + } + err = mw.WriteIntf(val) + if err != nil { + return + } + } + return +} + +// WriteTime writes a time.Time object to the wire. +// +// Time is encoded as Unix time, which means that +// location (time zone) data is removed from the object. +// The encoded object itself is 12 bytes: 8 bytes for +// a big-endian 64-bit integer denoting seconds +// elapsed since "zero" Unix time, followed by 4 bytes +// for a big-endian 32-bit signed integer denoting +// the nanosecond offset of the time. This encoding +// is intended to ease portability across languages. +// (Note that this is *not* the standard time.Time +// binary encoding, because its implementation relies +// heavily on the internal representation used by the +// time package.) +func (mw *Writer) WriteTime(t time.Time) error { + t = t.UTC() + o, err := mw.require(15) + if err != nil { + return err + } + mw.buf[o] = mext8 + mw.buf[o+1] = 12 + mw.buf[o+2] = TimeExtension + putUnix(mw.buf[o+3:], t.Unix(), int32(t.Nanosecond())) + return nil +} + +// WriteIntf writes the concrete type of 'v'. +// WriteIntf will error if 'v' is not one of the following: +// - A bool, float, string, []byte, int, uint, or complex +// - A map of supported types (with string keys) +// - An array or slice of supported types +// - A pointer to a supported type +// - A type that satisfies the msgp.Encodable interface +// - A type that satisfies the msgp.Extension interface +func (mw *Writer) WriteIntf(v interface{}) error { + if v == nil { + return mw.WriteNil() + } + switch v := v.(type) { + + // preferred interfaces + + case Encodable: + return v.EncodeMsg(mw) + case Extension: + return mw.WriteExtension(v) + + // concrete types + + case bool: + return mw.WriteBool(v) + case float32: + return mw.WriteFloat32(v) + case float64: + return mw.WriteFloat64(v) + case complex64: + return mw.WriteComplex64(v) + case complex128: + return mw.WriteComplex128(v) + case uint8: + return mw.WriteUint8(v) + case uint16: + return mw.WriteUint16(v) + case uint32: + return mw.WriteUint32(v) + case uint64: + return mw.WriteUint64(v) + case uint: + return mw.WriteUint(v) + case int8: + return mw.WriteInt8(v) + case int16: + return mw.WriteInt16(v) + case int32: + return mw.WriteInt32(v) + case int64: + return mw.WriteInt64(v) + case int: + return mw.WriteInt(v) + case string: + return mw.WriteString(v) + case []byte: + return mw.WriteBytes(v) + case map[string]string: + return mw.WriteMapStrStr(v) + case map[string]interface{}: + return mw.WriteMapStrIntf(v) + case time.Time: + return mw.WriteTime(v) + } + + val := reflect.ValueOf(v) + if !isSupported(val.Kind()) || !val.IsValid() { + return fmt.Errorf("msgp: type %s not supported", val) + } + + switch val.Kind() { + case reflect.Ptr: + if val.IsNil() { + return mw.WriteNil() + } + return mw.WriteIntf(val.Elem().Interface()) + case reflect.Slice: + return mw.writeSlice(val) + case reflect.Map: + return mw.writeMap(val) + } + return &ErrUnsupportedType{T: val.Type()} +} + +func (mw *Writer) writeMap(v reflect.Value) (err error) { + if v.Type().Key().Kind() != reflect.String { + return errors.New("msgp: map keys must be strings") + } + ks := v.MapKeys() + err = mw.WriteMapHeader(uint32(len(ks))) + if err != nil { + return + } + for _, key := range ks { + val := v.MapIndex(key) + err = mw.WriteString(key.String()) + if err != nil { + return + } + err = mw.WriteIntf(val.Interface()) + if err != nil { + return + } + } + return +} + +func (mw *Writer) writeSlice(v reflect.Value) (err error) { + // is []byte + if v.Type().ConvertibleTo(btsType) { + return mw.WriteBytes(v.Bytes()) + } + + sz := uint32(v.Len()) + err = mw.WriteArrayHeader(sz) + if err != nil { + return + } + for i := uint32(0); i < sz; i++ { + err = mw.WriteIntf(v.Index(int(i)).Interface()) + if err != nil { + return + } + } + return +} + +func (mw *Writer) writeStruct(v reflect.Value) error { + if enc, ok := v.Interface().(Encodable); ok { + return enc.EncodeMsg(mw) + } + return fmt.Errorf("msgp: unsupported type: %s", v.Type()) +} + +func (mw *Writer) writeVal(v reflect.Value) error { + if !isSupported(v.Kind()) { + return fmt.Errorf("msgp: msgp/enc: type %q not supported", v.Type()) + } + + // shortcut for nil values + if v.IsNil() { + return mw.WriteNil() + } + switch v.Kind() { + case reflect.Bool: + return mw.WriteBool(v.Bool()) + + case reflect.Float32, reflect.Float64: + return mw.WriteFloat64(v.Float()) + + case reflect.Complex64, reflect.Complex128: + return mw.WriteComplex128(v.Complex()) + + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int8: + return mw.WriteInt64(v.Int()) + + case reflect.Interface, reflect.Ptr: + if v.IsNil() { + mw.WriteNil() + } + return mw.writeVal(v.Elem()) + + case reflect.Map: + return mw.writeMap(v) + + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint8: + return mw.WriteUint64(v.Uint()) + + case reflect.String: + return mw.WriteString(v.String()) + + case reflect.Slice, reflect.Array: + return mw.writeSlice(v) + + case reflect.Struct: + return mw.writeStruct(v) + + } + return fmt.Errorf("msgp: msgp/enc: type %q not supported", v.Type()) +} + +// is the reflect.Kind encodable? +func isSupported(k reflect.Kind) bool { + switch k { + case reflect.Func, reflect.Chan, reflect.Invalid, reflect.UnsafePointer: + return false + default: + return true + } +} + +// GuessSize guesses the size of the underlying +// value of 'i'. If the underlying value is not +// a simple builtin (or []byte), GuessSize defaults +// to 512. +func GuessSize(i interface{}) int { + if i == nil { + return NilSize + } + + switch i := i.(type) { + case Sizer: + return i.Msgsize() + case Extension: + return ExtensionPrefixSize + i.Len() + case float64: + return Float64Size + case float32: + return Float32Size + case uint8, uint16, uint32, uint64, uint: + return UintSize + case int8, int16, int32, int64, int: + return IntSize + case []byte: + return BytesPrefixSize + len(i) + case string: + return StringPrefixSize + len(i) + case complex64: + return Complex64Size + case complex128: + return Complex128Size + case bool: + return BoolSize + case map[string]interface{}: + s := MapHeaderSize + for key, val := range i { + s += StringPrefixSize + len(key) + GuessSize(val) + } + return s + case map[string]string: + s := MapHeaderSize + for key, val := range i { + s += 2*StringPrefixSize + len(key) + len(val) + } + return s + default: + return 512 + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/write_bytes.go b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go new file mode 100644 index 0000000..eaa03c4 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go @@ -0,0 +1,411 @@ +package msgp + +import ( + "math" + "reflect" + "time" +) + +// ensure 'sz' extra bytes in 'b' btw len(b) and cap(b) +func ensure(b []byte, sz int) ([]byte, int) { + l := len(b) + c := cap(b) + if c-l < sz { + o := make([]byte, (2*c)+sz) // exponential growth + n := copy(o, b) + return o[:n+sz], n + } + return b[:l+sz], l +} + +// AppendMapHeader appends a map header with the +// given size to the slice +func AppendMapHeader(b []byte, sz uint32) []byte { + switch { + case sz <= 15: + return append(b, wfixmap(uint8(sz))) + + case sz <= math.MaxUint16: + o, n := ensure(b, 3) + prefixu16(o[n:], mmap16, uint16(sz)) + return o + + default: + o, n := ensure(b, 5) + prefixu32(o[n:], mmap32, sz) + return o + } +} + +// AppendArrayHeader appends an array header with +// the given size to the slice +func AppendArrayHeader(b []byte, sz uint32) []byte { + switch { + case sz <= 15: + return append(b, wfixarray(uint8(sz))) + + case sz <= math.MaxUint16: + o, n := ensure(b, 3) + prefixu16(o[n:], marray16, uint16(sz)) + return o + + default: + o, n := ensure(b, 5) + prefixu32(o[n:], marray32, sz) + return o + } +} + +// AppendNil appends a 'nil' byte to the slice +func AppendNil(b []byte) []byte { return append(b, mnil) } + +// AppendFloat64 appends a float64 to the slice +func AppendFloat64(b []byte, f float64) []byte { + o, n := ensure(b, Float64Size) + prefixu64(o[n:], mfloat64, math.Float64bits(f)) + return o +} + +// AppendFloat32 appends a float32 to the slice +func AppendFloat32(b []byte, f float32) []byte { + o, n := ensure(b, Float32Size) + prefixu32(o[n:], mfloat32, math.Float32bits(f)) + return o +} + +// AppendInt64 appends an int64 to the slice +func AppendInt64(b []byte, i int64) []byte { + if i >= 0 { + switch { + case i <= math.MaxInt8: + return append(b, wfixint(uint8(i))) + case i <= math.MaxInt16: + o, n := ensure(b, 3) + putMint16(o[n:], int16(i)) + return o + case i <= math.MaxInt32: + o, n := ensure(b, 5) + putMint32(o[n:], int32(i)) + return o + default: + o, n := ensure(b, 9) + putMint64(o[n:], i) + return o + } + } + switch { + case i >= -32: + return append(b, wnfixint(int8(i))) + case i >= math.MinInt8: + o, n := ensure(b, 2) + putMint8(o[n:], int8(i)) + return o + case i >= math.MinInt16: + o, n := ensure(b, 3) + putMint16(o[n:], int16(i)) + return o + case i >= math.MinInt32: + o, n := ensure(b, 5) + putMint32(o[n:], int32(i)) + return o + default: + o, n := ensure(b, 9) + putMint64(o[n:], i) + return o + } +} + +// AppendInt appends an int to the slice +func AppendInt(b []byte, i int) []byte { return AppendInt64(b, int64(i)) } + +// AppendInt8 appends an int8 to the slice +func AppendInt8(b []byte, i int8) []byte { return AppendInt64(b, int64(i)) } + +// AppendInt16 appends an int16 to the slice +func AppendInt16(b []byte, i int16) []byte { return AppendInt64(b, int64(i)) } + +// AppendInt32 appends an int32 to the slice +func AppendInt32(b []byte, i int32) []byte { return AppendInt64(b, int64(i)) } + +// AppendUint64 appends a uint64 to the slice +func AppendUint64(b []byte, u uint64) []byte { + switch { + case u <= (1<<7)-1: + return append(b, wfixint(uint8(u))) + + case u <= math.MaxUint8: + o, n := ensure(b, 2) + putMuint8(o[n:], uint8(u)) + return o + + case u <= math.MaxUint16: + o, n := ensure(b, 3) + putMuint16(o[n:], uint16(u)) + return o + + case u <= math.MaxUint32: + o, n := ensure(b, 5) + putMuint32(o[n:], uint32(u)) + return o + + default: + o, n := ensure(b, 9) + putMuint64(o[n:], u) + return o + + } +} + +// AppendUint appends a uint to the slice +func AppendUint(b []byte, u uint) []byte { return AppendUint64(b, uint64(u)) } + +// AppendUint8 appends a uint8 to the slice +func AppendUint8(b []byte, u uint8) []byte { return AppendUint64(b, uint64(u)) } + +// AppendByte is analogous to AppendUint8 +func AppendByte(b []byte, u byte) []byte { return AppendUint8(b, uint8(u)) } + +// AppendUint16 appends a uint16 to the slice +func AppendUint16(b []byte, u uint16) []byte { return AppendUint64(b, uint64(u)) } + +// AppendUint32 appends a uint32 to the slice +func AppendUint32(b []byte, u uint32) []byte { return AppendUint64(b, uint64(u)) } + +// AppendBytes appends bytes to the slice as MessagePack 'bin' data +func AppendBytes(b []byte, bts []byte) []byte { + sz := len(bts) + var o []byte + var n int + switch { + case sz <= math.MaxUint8: + o, n = ensure(b, 2+sz) + prefixu8(o[n:], mbin8, uint8(sz)) + n += 2 + case sz <= math.MaxUint16: + o, n = ensure(b, 3+sz) + prefixu16(o[n:], mbin16, uint16(sz)) + n += 3 + default: + o, n = ensure(b, 5+sz) + prefixu32(o[n:], mbin32, uint32(sz)) + n += 5 + } + return o[:n+copy(o[n:], bts)] +} + +// AppendBool appends a bool to the slice +func AppendBool(b []byte, t bool) []byte { + if t { + return append(b, mtrue) + } + return append(b, mfalse) +} + +// AppendString appends a string as a MessagePack 'str' to the slice +func AppendString(b []byte, s string) []byte { + sz := len(s) + var n int + var o []byte + switch { + case sz <= 31: + o, n = ensure(b, 1+sz) + o[n] = wfixstr(uint8(sz)) + n++ + case sz <= math.MaxUint8: + o, n = ensure(b, 2+sz) + prefixu8(o[n:], mstr8, uint8(sz)) + n += 2 + case sz <= math.MaxUint16: + o, n = ensure(b, 3+sz) + prefixu16(o[n:], mstr16, uint16(sz)) + n += 3 + default: + o, n = ensure(b, 5+sz) + prefixu32(o[n:], mstr32, uint32(sz)) + n += 5 + } + return o[:n+copy(o[n:], s)] +} + +// AppendStringFromBytes appends a []byte +// as a MessagePack 'str' to the slice 'b.' +func AppendStringFromBytes(b []byte, str []byte) []byte { + sz := len(str) + var n int + var o []byte + switch { + case sz <= 31: + o, n = ensure(b, 1+sz) + o[n] = wfixstr(uint8(sz)) + n++ + case sz <= math.MaxUint8: + o, n = ensure(b, 2+sz) + prefixu8(o[n:], mstr8, uint8(sz)) + n += 2 + case sz <= math.MaxUint16: + o, n = ensure(b, 3+sz) + prefixu16(o[n:], mstr16, uint16(sz)) + n += 3 + default: + o, n = ensure(b, 5+sz) + prefixu32(o[n:], mstr32, uint32(sz)) + n += 5 + } + return o[:n+copy(o[n:], str)] +} + +// AppendComplex64 appends a complex64 to the slice as a MessagePack extension +func AppendComplex64(b []byte, c complex64) []byte { + o, n := ensure(b, Complex64Size) + o[n] = mfixext8 + o[n+1] = Complex64Extension + big.PutUint32(o[n+2:], math.Float32bits(real(c))) + big.PutUint32(o[n+6:], math.Float32bits(imag(c))) + return o +} + +// AppendComplex128 appends a complex128 to the slice as a MessagePack extension +func AppendComplex128(b []byte, c complex128) []byte { + o, n := ensure(b, Complex128Size) + o[n] = mfixext16 + o[n+1] = Complex128Extension + big.PutUint64(o[n+2:], math.Float64bits(real(c))) + big.PutUint64(o[n+10:], math.Float64bits(imag(c))) + return o +} + +// AppendTime appends a time.Time to the slice as a MessagePack extension +func AppendTime(b []byte, t time.Time) []byte { + o, n := ensure(b, TimeSize) + t = t.UTC() + o[n] = mext8 + o[n+1] = 12 + o[n+2] = TimeExtension + putUnix(o[n+3:], t.Unix(), int32(t.Nanosecond())) + return o +} + +// AppendMapStrStr appends a map[string]string to the slice +// as a MessagePack map with 'str'-type keys and values +func AppendMapStrStr(b []byte, m map[string]string) []byte { + sz := uint32(len(m)) + b = AppendMapHeader(b, sz) + for key, val := range m { + b = AppendString(b, key) + b = AppendString(b, val) + } + return b +} + +// AppendMapStrIntf appends a map[string]interface{} to the slice +// as a MessagePack map with 'str'-type keys. +func AppendMapStrIntf(b []byte, m map[string]interface{}) ([]byte, error) { + sz := uint32(len(m)) + b = AppendMapHeader(b, sz) + var err error + for key, val := range m { + b = AppendString(b, key) + b, err = AppendIntf(b, val) + if err != nil { + return b, err + } + } + return b, nil +} + +// AppendIntf appends the concrete type of 'i' to the +// provided []byte. 'i' must be one of the following: +// - 'nil' +// - A bool, float, string, []byte, int, uint, or complex +// - A map[string]interface{} or map[string]string +// - A []T, where T is another supported type +// - A *T, where T is another supported type +// - A type that satisfieds the msgp.Marshaler interface +// - A type that satisfies the msgp.Extension interface +func AppendIntf(b []byte, i interface{}) ([]byte, error) { + if i == nil { + return AppendNil(b), nil + } + + // all the concrete types + // for which we have methods + switch i := i.(type) { + case Marshaler: + return i.MarshalMsg(b) + case Extension: + return AppendExtension(b, i) + case bool: + return AppendBool(b, i), nil + case float32: + return AppendFloat32(b, i), nil + case float64: + return AppendFloat64(b, i), nil + case complex64: + return AppendComplex64(b, i), nil + case complex128: + return AppendComplex128(b, i), nil + case string: + return AppendString(b, i), nil + case []byte: + return AppendBytes(b, i), nil + case int8: + return AppendInt8(b, i), nil + case int16: + return AppendInt16(b, i), nil + case int32: + return AppendInt32(b, i), nil + case int64: + return AppendInt64(b, i), nil + case int: + return AppendInt64(b, int64(i)), nil + case uint: + return AppendUint64(b, uint64(i)), nil + case uint8: + return AppendUint8(b, i), nil + case uint16: + return AppendUint16(b, i), nil + case uint32: + return AppendUint32(b, i), nil + case uint64: + return AppendUint64(b, i), nil + case time.Time: + return AppendTime(b, i), nil + case map[string]interface{}: + return AppendMapStrIntf(b, i) + case map[string]string: + return AppendMapStrStr(b, i), nil + case []interface{}: + b = AppendArrayHeader(b, uint32(len(i))) + var err error + for _, k := range i { + b, err = AppendIntf(b, k) + if err != nil { + return b, err + } + } + return b, nil + } + + var err error + v := reflect.ValueOf(i) + switch v.Kind() { + case reflect.Array, reflect.Slice: + l := v.Len() + b = AppendArrayHeader(b, uint32(l)) + for i := 0; i < l; i++ { + b, err = AppendIntf(b, v.Index(i).Interface()) + if err != nil { + return b, err + } + } + return b, nil + case reflect.Ptr: + if v.IsNil() { + return AppendNil(b), err + } + b, err = AppendIntf(b, v.Elem().Interface()) + return b, err + default: + return b, &ErrUnsupportedType{T: v.Type()} + } +} diff --git a/vendor/github.com/wiggin77/cfg/.gitignore b/vendor/github.com/wiggin77/cfg/.gitignore new file mode 100644 index 0000000..f1c181e --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/.gitignore @@ -0,0 +1,12 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/github.com/wiggin77/cfg/.travis.yml b/vendor/github.com/wiggin77/cfg/.travis.yml new file mode 100644 index 0000000..9899b38 --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/.travis.yml @@ -0,0 +1,5 @@ +language: go +sudo: false +before_script: + - go vet ./... + \ No newline at end of file diff --git a/vendor/github.com/wiggin77/cfg/LICENSE b/vendor/github.com/wiggin77/cfg/LICENSE new file mode 100644 index 0000000..2b0bf7e --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 wiggin77 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/wiggin77/cfg/README.md b/vendor/github.com/wiggin77/cfg/README.md new file mode 100644 index 0000000..583a82c --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/README.md @@ -0,0 +1,43 @@ +# cfg + +[![GoDoc](https://godoc.org/github.com/wiggin77/cfg?status.svg)](https://godoc.org/github.com/wiggin77/cfg) +[![Build Status](https://travis-ci.org/wiggin77/cfg.svg?branch=master)](https://travis-ci.org/wiggin77/cfg) + +Go package for app configuration. Supports chained configuration sources for multiple levels of defaults. +Includes APIs for loading Linux style configuration files (name/value pairs) or INI files, map based properties, +or easily create new configuration sources (e.g. load from database). + +Supports monitoring configuration sources for changes, hot loading properties, and notifying listeners of changes. + +## Usage + +```Go +config := &cfg.Config{} +defer config.Shutdown() // stops monitoring + +// load file via filespec string, os.File +src, err := Config.NewSrcFileFromFilespec("./myfile.conf") +if err != nil { + return err +} +// add src to top of chain, meaning first searched +cfg.PrependSource(src) + +// fetch prop 'retries', default to 3 if not found +val := config.Int("retries", 3) +``` + +See [example](./example_test.go) for more complete example, including listening for configuration changes. + +Config API parses the following data types: + +| type | method | example property values | +| ------- | ------ | -------- | +| string | Config.String | test, "" | +| int | Config.Int | -1, 77, 0 | +| int64 | Config.Int64 | -9223372036854775, 372036854775808 | +| float64 | Config.Float64 | -77.3456, 95642331.1 | +| bool | Config.Bool | T,t,true,True,1,0,False,false,f,F | +| time.Duration | Config.Duration | "10ms", "2 hours", "5 min" * | + +\* Units of measure supported: ms, sec, min, hour, day, week, year. diff --git a/vendor/github.com/wiggin77/cfg/config.go b/vendor/github.com/wiggin77/cfg/config.go new file mode 100644 index 0000000..0e95810 --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/config.go @@ -0,0 +1,366 @@ +package cfg + +import ( + "errors" + "fmt" + "strconv" + "strings" + "sync" + "time" + + "github.com/wiggin77/cfg/timeconv" +) + +// ErrNotFound returned when an operation is attempted on a +// resource that doesn't exist, such as fetching a non-existing +// property name. +var ErrNotFound = errors.New("not found") + +type sourceEntry struct { + src Source + props map[string]string +} + +// Config provides methods for retrieving property values from one or more +// configuration sources. +type Config struct { + mutexSrc sync.RWMutex + mutexListeners sync.RWMutex + srcs []*sourceEntry + chgListeners []ChangedListener + shutdown chan interface{} + wantPanicOnError bool +} + +// PrependSource inserts one or more `Sources` at the beginning of +// the list of sources such that the first source will be the +// source checked first when resolving a property value. +func (config *Config) PrependSource(srcs ...Source) { + arr := config.wrapSources(srcs...) + + config.mutexSrc.Lock() + if config.shutdown == nil { + config.shutdown = make(chan interface{}) + } + config.srcs = append(arr, config.srcs...) + config.mutexSrc.Unlock() + + for _, se := range arr { + if _, ok := se.src.(SourceMonitored); ok { + config.monitor(se) + } + } +} + +// AppendSource appends one or more `Sources` at the end of +// the list of sources such that the last source will be the +// source checked last when resolving a property value. +func (config *Config) AppendSource(srcs ...Source) { + arr := config.wrapSources(srcs...) + + config.mutexSrc.Lock() + if config.shutdown == nil { + config.shutdown = make(chan interface{}) + } + config.srcs = append(config.srcs, arr...) + config.mutexSrc.Unlock() + + for _, se := range arr { + if _, ok := se.src.(SourceMonitored); ok { + config.monitor(se) + } + } +} + +// wrapSources wraps one or more Source's and returns +// them as an array of `sourceEntry`. +func (config *Config) wrapSources(srcs ...Source) []*sourceEntry { + arr := make([]*sourceEntry, 0, len(srcs)) + for _, src := range srcs { + se := &sourceEntry{src: src} + config.reloadProps(se) + arr = append(arr, se) + } + return arr +} + +// SetWantPanicOnError sets the flag determining if Config +// should panic when `GetProps` or `GetLastModified` errors +// for a `Source`. +func (config *Config) SetWantPanicOnError(b bool) { + config.mutexSrc.Lock() + config.wantPanicOnError = b + config.mutexSrc.Unlock() +} + +// ShouldPanicOnError gets the flag determining if Config +// should panic when `GetProps` or `GetLastModified` errors +// for a `Source`. +func (config *Config) ShouldPanicOnError() (b bool) { + config.mutexSrc.RLock() + b = config.wantPanicOnError + config.mutexSrc.RUnlock() + return b +} + +// getProp returns the value of a named property. +// Each `Source` is checked, in the order created by adding via +// `AppendSource` and `PrependSource`, until a value for the +// property is found. +func (config *Config) getProp(name string) (val string, ok bool) { + config.mutexSrc.RLock() + defer config.mutexSrc.RUnlock() + + var s string + for _, se := range config.srcs { + if se.props != nil { + if s, ok = se.props[name]; ok { + val = strings.TrimSpace(s) + return + } + } + } + return +} + +// String returns the value of the named prop as a string. +// If the property is not found then the supplied default `def` +// and `ErrNotFound` are returned. +func (config *Config) String(name string, def string) (val string, err error) { + if v, ok := config.getProp(name); ok { + val = v + err = nil + return + } + + err = ErrNotFound + val = def + return +} + +// Int returns the value of the named prop as an `int`. +// If the property is not found then the supplied default `def` +// and `ErrNotFound` are returned. +// +// See config.String +func (config *Config) Int(name string, def int) (val int, err error) { + var s string + if s, err = config.String(name, ""); err == nil { + var i int64 + if i, err = strconv.ParseInt(s, 10, 32); err == nil { + val = int(i) + } + } + if err != nil { + val = def + } + return +} + +// Int64 returns the value of the named prop as an `int64`. +// If the property is not found then the supplied default `def` +// and `ErrNotFound` are returned. +// +// See config.String +func (config *Config) Int64(name string, def int64) (val int64, err error) { + var s string + if s, err = config.String(name, ""); err == nil { + val, err = strconv.ParseInt(s, 10, 64) + } + if err != nil { + val = def + } + return +} + +// Float64 returns the value of the named prop as a `float64`. +// If the property is not found then the supplied default `def` +// and `ErrNotFound` are returned. +// +// See config.String +func (config *Config) Float64(name string, def float64) (val float64, err error) { + var s string + if s, err = config.String(name, ""); err == nil { + val, err = strconv.ParseFloat(s, 64) + } + if err != nil { + val = def + } + return +} + +// Bool returns the value of the named prop as a `bool`. +// If the property is not found then the supplied default `def` +// and `ErrNotFound` are returned. +// +// Supports (t, true, 1, y, yes) for true, and (f, false, 0, n, no) for false, +// all case-insensitive. +// +// See config.String +func (config *Config) Bool(name string, def bool) (val bool, err error) { + var s string + if s, err = config.String(name, ""); err == nil { + switch strings.ToLower(s) { + case "t", "true", "1", "y", "yes": + val = true + case "f", "false", "0", "n", "no": + val = false + default: + err = errors.New("invalid syntax") + } + } + if err != nil { + val = def + } + return +} + +// Duration returns the value of the named prop as a `time.Duration`, representing +// a span of time. +// +// Units of measure are supported: ms, sec, min, hour, day, week, year. +// See config.UnitsToMillis for a complete list of units supported. +// +// If the property is not found then the supplied default `def` +// and `ErrNotFound` are returned. +// +// See config.String +func (config *Config) Duration(name string, def time.Duration) (val time.Duration, err error) { + var s string + if s, err = config.String(name, ""); err == nil { + var ms int64 + ms, err = timeconv.ParseMilliseconds(s) + val = time.Duration(ms) * time.Millisecond + } + if err != nil { + val = def + } + return +} + +// AddChangedListener adds a listener that will receive notifications +// whenever one or more property values change within the config. +func (config *Config) AddChangedListener(l ChangedListener) { + config.mutexListeners.Lock() + defer config.mutexListeners.Unlock() + + config.chgListeners = append(config.chgListeners, l) +} + +// RemoveChangedListener removes all instances of a ChangedListener. +// Returns `ErrNotFound` if the listener was not present. +func (config *Config) RemoveChangedListener(l ChangedListener) error { + config.mutexListeners.Lock() + defer config.mutexListeners.Unlock() + + dest := make([]ChangedListener, 0, len(config.chgListeners)) + err := ErrNotFound + + // Remove all instances of the listener by + // copying list while filtering. + for _, s := range config.chgListeners { + if s != l { + dest = append(dest, s) + } else { + err = nil + } + } + config.chgListeners = dest + return err +} + +// Shutdown can be called to stop monitoring of all config sources. +func (config *Config) Shutdown() { + config.mutexSrc.RLock() + defer config.mutexSrc.RUnlock() + if config.shutdown != nil { + close(config.shutdown) + } +} + +// onSourceChanged is called whenever one or more properties of a +// config source has changed. +func (config *Config) onSourceChanged(src SourceMonitored) { + defer func() { + if p := recover(); p != nil { + fmt.Println(p) + } + }() + config.mutexListeners.RLock() + defer config.mutexListeners.RUnlock() + for _, l := range config.chgListeners { + l.ConfigChanged(config, src) + } +} + +// monitor periodically checks a config source for changes. +func (config *Config) monitor(se *sourceEntry) { + go func(se *sourceEntry, shutdown <-chan interface{}) { + var src SourceMonitored + var ok bool + if src, ok = se.src.(SourceMonitored); !ok { + return + } + paused := false + last := time.Time{} + freq := src.GetMonitorFreq() + if freq <= 0 { + paused = true + freq = 10 + last, _ = src.GetLastModified() + } + timer := time.NewTimer(freq) + for { + select { + case <-timer.C: + if !paused { + if latest, err := src.GetLastModified(); err != nil { + if config.ShouldPanicOnError() { + panic(fmt.Sprintf("error <%v> getting last modified for %v", err, src)) + } + } else { + if last.Before(latest) { + last = latest + config.reloadProps(se) + // TODO: calc diff and provide detailed changes + config.onSourceChanged(src) + } + } + } + freq = src.GetMonitorFreq() + if freq <= 0 { + paused = true + freq = 10 + } else { + paused = false + } + timer.Reset(freq) + case <-shutdown: + // stop the timer and exit + if !timer.Stop() { + <-timer.C + } + return + } + } + }(se, config.shutdown) +} + +// reloadProps causes a Source to reload its properties. +func (config *Config) reloadProps(se *sourceEntry) { + config.mutexSrc.Lock() + defer config.mutexSrc.Unlock() + + m, err := se.src.GetProps() + if err != nil { + if config.wantPanicOnError { + panic(fmt.Sprintf("GetProps error for %v", se.src)) + } + return + } + + se.props = make(map[string]string) + for k, v := range m { + se.props[k] = v + } +} diff --git a/vendor/github.com/wiggin77/cfg/go.mod b/vendor/github.com/wiggin77/cfg/go.mod new file mode 100644 index 0000000..2e5a038 --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/go.mod @@ -0,0 +1,5 @@ +module github.com/wiggin77/cfg + +go 1.12 + +require github.com/wiggin77/merror v1.0.2 diff --git a/vendor/github.com/wiggin77/cfg/go.sum b/vendor/github.com/wiggin77/cfg/go.sum new file mode 100644 index 0000000..30fd3b5 --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/go.sum @@ -0,0 +1,2 @@ +github.com/wiggin77/merror v1.0.2 h1:V0nH9eFp64ASyaXC+pB5WpvBoCg7NUwvaCSKdzlcHqw= +github.com/wiggin77/merror v1.0.2/go.mod h1:uQTcIU0Z6jRK4OwqganPYerzQxSFJ4GSHM3aurxxQpg= diff --git a/vendor/github.com/wiggin77/cfg/ini/ini.go b/vendor/github.com/wiggin77/cfg/ini/ini.go new file mode 100644 index 0000000..d28d744 --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/ini/ini.go @@ -0,0 +1,167 @@ +package ini + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "sync" + "time" +) + +// Ini provides parsing and querying of INI format or simple name/value pairs +// such as a simple config file. +// A name/value pair format is just an INI with no sections, and properties can +// be queried using an empty section name. +type Ini struct { + mutex sync.RWMutex + m map[string]*Section + lm time.Time +} + +// LoadFromFilespec loads an INI file from string containing path and filename. +func (ini *Ini) LoadFromFilespec(filespec string) error { + f, err := os.Open(filespec) + if err != nil { + return err + } + return ini.LoadFromFile(f) +} + +// LoadFromFile loads an INI file from `os.File`. +func (ini *Ini) LoadFromFile(file *os.File) error { + + fi, err := file.Stat() + if err != nil { + return err + } + lm := fi.ModTime() + + if err := ini.LoadFromReader(file); err != nil { + return err + } + ini.lm = lm + return nil +} + +// LoadFromReader loads an INI file from an `io.Reader`. +func (ini *Ini) LoadFromReader(reader io.Reader) error { + data, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + return ini.LoadFromString(string(data)) +} + +// LoadFromString parses an INI from a string . +func (ini *Ini) LoadFromString(s string) error { + m, err := getSections(s) + if err != nil { + return err + } + ini.mutex.Lock() + ini.m = m + ini.lm = time.Now() + ini.mutex.Unlock() + return nil +} + +// GetLastModified returns the last modified timestamp of the +// INI contents. +func (ini *Ini) GetLastModified() time.Time { + return ini.lm +} + +// GetSectionNames returns the names of all sections in this INI. +// Note, the returned section names are a snapshot in time, meaning +// other goroutines may change the contents of this INI as soon as +// the method returns. +func (ini *Ini) GetSectionNames() []string { + ini.mutex.RLock() + defer ini.mutex.RUnlock() + + arr := make([]string, 0, len(ini.m)) + for key := range ini.m { + arr = append(arr, key) + } + return arr +} + +// GetKeys returns the names of all keys in the specified section. +// Note, the returned key names are a snapshot in time, meaning other +// goroutines may change the contents of this INI as soon as the +// method returns. +func (ini *Ini) GetKeys(sectionName string) ([]string, error) { + sec, err := ini.getSection(sectionName) + if err != nil { + return nil, err + } + return sec.getKeys(), nil +} + +// getSection returns the named section. +func (ini *Ini) getSection(sectionName string) (*Section, error) { + ini.mutex.RLock() + defer ini.mutex.RUnlock() + + sec, ok := ini.m[sectionName] + if !ok { + return nil, fmt.Errorf("section '%s' not found", sectionName) + } + return sec, nil +} + +// GetFlattenedKeys returns all section names plus keys as one +// flattened array. +func (ini *Ini) GetFlattenedKeys() []string { + ini.mutex.RLock() + defer ini.mutex.RUnlock() + + arr := make([]string, 0, len(ini.m)*2) + for _, section := range ini.m { + keys := section.getKeys() + for _, key := range keys { + name := section.GetName() + if name != "" { + key = name + "." + key + } + arr = append(arr, key) + } + } + return arr +} + +// GetProp returns the value of the specified key in the named section. +func (ini *Ini) GetProp(section string, key string) (val string, ok bool) { + sec, err := ini.getSection(section) + if err != nil { + return val, false + } + return sec.GetProp(key) +} + +// ToMap returns a flattened map of the section name plus keys mapped +// to values. +func (ini *Ini) ToMap() map[string]string { + m := make(map[string]string) + + ini.mutex.RLock() + defer ini.mutex.RUnlock() + + for _, section := range ini.m { + for _, key := range section.getKeys() { + val, ok := section.GetProp(key) + if ok { + name := section.GetName() + var mapkey string + if name != "" { + mapkey = name + "." + key + } else { + mapkey = key + } + m[mapkey] = val + } + } + } + return m +} diff --git a/vendor/github.com/wiggin77/cfg/ini/parser.go b/vendor/github.com/wiggin77/cfg/ini/parser.go new file mode 100644 index 0000000..2891640 --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/ini/parser.go @@ -0,0 +1,142 @@ +package ini + +import ( + "fmt" + "strings" + + "github.com/wiggin77/merror" +) + +// LF is linefeed +const LF byte = 0x0A + +// CR is carriage return +const CR byte = 0x0D + +// getSections parses an INI formatted string, or string containing just name/value pairs, +// returns map of `Section`'s. +// +// Any name/value pairs appearing before a section name are added to the section named +// with an empty string (""). Also true for Linux-style config files where all props +// are outside a named section. +// +// Any errors encountered are aggregated and returned, along with the partially parsed +// sections. +func getSections(str string) (map[string]*Section, error) { + merr := merror.New() + mapSections := make(map[string]*Section) + lines := buildLineArray(str) + section := newSection("") + + for _, line := range lines { + name, ok := parseSection(line) + if ok { + // A section name encountered. Stop processing the current one. + // Don't add the current section to the map if the section name is blank + // and the prop map is empty. + nameCurr := section.GetName() + if nameCurr != "" || section.hasKeys() { + mapSections[nameCurr] = section + } + // Start processing a new section. + section = newSection(name) + } else { + // Parse the property and add to the current section, or ignore if comment. + if k, v, comment, err := parseProp(line); !comment && err == nil { + section.setProp(k, v) + } else if err != nil { + merr.Append(err) // aggregate errors + } + } + + } + // If the current section is not empty, add it. + if section.hasKeys() { + mapSections[section.GetName()] = section + } + return mapSections, merr.ErrorOrNil() +} + +// buildLineArray parses the given string buffer and creates a list of strings, +// one for each line in the string buffer. +// +// A line is considered to be terminated by any one of a line feed ('\n'), +// a carriage return ('\r'), or a carriage return followed immediately by a +// linefeed. +// +// Lines prefixed with ';' or '#' are considered comments and skipped. +func buildLineArray(str string) []string { + arr := make([]string, 0, 10) + str = str + "\n" + + iLen := len(str) + iPos, iBegin := 0, 0 + var ch byte + + for iPos < iLen { + ch = str[iPos] + if ch == LF || ch == CR { + sub := str[iBegin:iPos] + sub = strings.TrimSpace(sub) + if sub != "" && !strings.HasPrefix(sub, ";") && !strings.HasPrefix(sub, "#") { + arr = append(arr, sub) + } + iPos++ + if ch == CR && iPos < iLen && str[iPos] == LF { + iPos++ + } + iBegin = iPos + } else { + iPos++ + } + } + return arr +} + +// parseSection parses the specified string for a section name enclosed in square brackets. +// Returns the section name found, or `ok=false` if `str` is not a section header. +func parseSection(str string) (name string, ok bool) { + str = strings.TrimSpace(str) + if !strings.HasPrefix(str, "[") { + return "", false + } + iCloser := strings.Index(str, "]") + if iCloser == -1 { + return "", false + } + return strings.TrimSpace(str[1:iCloser]), true +} + +// parseProp parses the specified string and extracts a key/value pair. +// +// If the string is a comment (prefixed with ';' or '#') then `comment=true` +// and key will be empty. +func parseProp(str string) (key string, val string, comment bool, err error) { + iLen := len(str) + iEqPos := strings.Index(str, "=") + if iEqPos == -1 { + return "", "", false, fmt.Errorf("not a key/value pair:'%s'", str) + } + + key = str[0:iEqPos] + key = strings.TrimSpace(key) + if iEqPos+1 < iLen { + val = str[iEqPos+1:] + val = strings.TrimSpace(val) + } + + // Check that the key has at least 1 char. + if key == "" { + return "", "", false, fmt.Errorf("key is empty for '%s'", str) + } + + // Check if this line is a comment that just happens + // to have an equals sign in it. Not an error, but not a + // useable line either. + if strings.HasPrefix(key, ";") || strings.HasPrefix(key, "#") { + key = "" + val = "" + comment = true + } + return key, val, comment, err +} diff --git a/vendor/github.com/wiggin77/cfg/ini/section.go b/vendor/github.com/wiggin77/cfg/ini/section.go new file mode 100644 index 0000000..18c4c25 --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/ini/section.go @@ -0,0 +1,109 @@ +package ini + +import ( + "fmt" + "strings" + "sync" +) + +// Section represents a section in an INI file. The section has a name, which is +// enclosed in square brackets in the file. The section also has an array of +// key/value pairs. +type Section struct { + name string + props map[string]string + mtx sync.RWMutex +} + +func newSection(name string) *Section { + sec := &Section{} + sec.name = name + sec.props = make(map[string]string) + return sec +} + +// addLines addes an array of strings containing name/value pairs +// of the format `key=value`. +//func addLines(lines []string) { +// TODO +//} + +// GetName returns the name of the section. +func (sec *Section) GetName() (name string) { + sec.mtx.RLock() + name = sec.name + sec.mtx.RUnlock() + return +} + +// GetProp returns the value associated with the given key, or +// `ok=false` if key does not exist. +func (sec *Section) GetProp(key string) (val string, ok bool) { + sec.mtx.RLock() + val, ok = sec.props[key] + sec.mtx.RUnlock() + return +} + +// SetProp sets the value associated with the given key. +func (sec *Section) setProp(key string, val string) { + sec.mtx.Lock() + sec.props[key] = val + sec.mtx.Unlock() +} + +// hasKeys returns true if there are one or more properties in +// this section. +func (sec *Section) hasKeys() (b bool) { + sec.mtx.RLock() + b = len(sec.props) > 0 + sec.mtx.RUnlock() + return +} + +// getKeys returns an array containing all keys in this section. +func (sec *Section) getKeys() []string { + sec.mtx.RLock() + defer sec.mtx.RUnlock() + + arr := make([]string, len(sec.props)) + idx := 0 + for k := range sec.props { + arr[idx] = k + idx++ + } + return arr +} + +// combine the given section with this one. +func (sec *Section) combine(sec2 *Section) { + sec.mtx.Lock() + sec2.mtx.RLock() + defer sec.mtx.Unlock() + defer sec2.mtx.RUnlock() + + for k, v := range sec2.props { + sec.props[k] = v + } +} + +// String returns a string representation of this section. +func (sec *Section) String() string { + return fmt.Sprintf("[%s]\n%s", sec.GetName(), sec.StringPropsOnly()) +} + +// StringPropsOnly returns a string representation of this section +// without the section header. +func (sec *Section) StringPropsOnly() string { + sec.mtx.RLock() + defer sec.mtx.RUnlock() + sb := &strings.Builder{} + + for k, v := range sec.props { + sb.WriteString(k) + sb.WriteString("=") + sb.WriteString(v) + sb.WriteString("\n") + } + return sb.String() +} diff --git a/vendor/github.com/wiggin77/cfg/listener.go b/vendor/github.com/wiggin77/cfg/listener.go new file mode 100644 index 0000000..12ea4e4 --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/listener.go @@ -0,0 +1,11 @@ +package cfg + +// ChangedListener interface is for receiving notifications +// when one or more properties within monitored config sources +// (SourceMonitored) have changed values. +type ChangedListener interface { + + // Changed is called when one or more properties in a `SourceMonitored` has a + // changed value. + ConfigChanged(cfg *Config, src SourceMonitored) +} diff --git a/vendor/github.com/wiggin77/cfg/nocopy.go b/vendor/github.com/wiggin77/cfg/nocopy.go new file mode 100644 index 0000000..f2450c0 --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/nocopy.go @@ -0,0 +1,11 @@ +package cfg + +// noCopy may be embedded into structs which must not be copied +// after the first use. +// +// See https://golang.org/issues/8005#issuecomment-190753527 +// for details. +type noCopy struct{} + +// Lock is a no-op used by -copylocks checker from `go vet`. +func (*noCopy) Lock() {} diff --git a/vendor/github.com/wiggin77/cfg/source.go b/vendor/github.com/wiggin77/cfg/source.go new file mode 100644 index 0000000..09083e9 --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/source.go @@ -0,0 +1,58 @@ +package cfg + +import ( + "sync" + "time" +) + +// Source is the interface required for any source of name/value pairs. +type Source interface { + + // GetProps fetches all the properties from a source and returns + // them as a map. + GetProps() (map[string]string, error) +} + +// SourceMonitored is the interface required for any config source that is +// monitored for changes. +type SourceMonitored interface { + Source + + // GetLastModified returns the time of the latest modification to any + // property value within the source. If a source does not support + // modifying properties at runtime then the zero value for `Time` + // should be returned to ensure reload events are not generated. + GetLastModified() (time.Time, error) + + // GetMonitorFreq returns the frequency as a `time.Duration` between + // checks for changes to this config source. + // + // Returning zero (or less) will temporarily suspend calls to `GetLastModified` + // and `GetMonitorFreq` will be called every 10 seconds until resumed, after which + // `GetMontitorFreq` will be called at a frequency roughly equal to the `time.Duration` + // returned. + GetMonitorFreq() time.Duration +} + +// AbstractSourceMonitor can be embedded in a custom `Source` to provide the +// basic plumbing for monitor frequency. +type AbstractSourceMonitor struct { + mutex sync.RWMutex + freq time.Duration +} + +// GetMonitorFreq returns the frequency as a `time.Duration` between +// checks for changes to this config source. +func (asm *AbstractSourceMonitor) GetMonitorFreq() (freq time.Duration) { + asm.mutex.RLock() + freq = asm.freq + asm.mutex.RUnlock() + return +} + +// SetMonitorFreq sets the frequency between checks for changes to this config source. +func (asm *AbstractSourceMonitor) SetMonitorFreq(freq time.Duration) { + asm.mutex.Lock() + asm.freq = freq + asm.mutex.Unlock() +} diff --git a/vendor/github.com/wiggin77/cfg/srcfile.go b/vendor/github.com/wiggin77/cfg/srcfile.go new file mode 100644 index 0000000..f42c69f --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/srcfile.go @@ -0,0 +1,63 @@ +package cfg + +import ( + "os" + "time" + + "github.com/wiggin77/cfg/ini" +) + +// SrcFile is a configuration `Source` backed by a file containing +// name/value pairs or INI format. +type SrcFile struct { + AbstractSourceMonitor + ini ini.Ini + file *os.File +} + +// NewSrcFileFromFilespec creates a new SrcFile with the specified filespec. +func NewSrcFileFromFilespec(filespec string) (*SrcFile, error) { + file, err := os.Open(filespec) + if err != nil { + return nil, err + } + return NewSrcFile(file) +} + +// NewSrcFile creates a new SrcFile with the specified os.File. +func NewSrcFile(file *os.File) (*SrcFile, error) { + sf := &SrcFile{} + sf.freq = time.Minute + sf.file = file + if err := sf.ini.LoadFromFile(file); err != nil { + return nil, err + } + return sf, nil +} + +// GetProps fetches all the properties from a source and returns +// them as a map. +func (sf *SrcFile) GetProps() (map[string]string, error) { + lm, err := sf.GetLastModified() + if err != nil { + return nil, err + } + + // Check if we need to reload. + if sf.ini.GetLastModified() != lm { + if err := sf.ini.LoadFromFile(sf.file); err != nil { + return nil, err + } + } + return sf.ini.ToMap(), nil +} + +// GetLastModified returns the time of the latest modification to any +// property value within the source. +func (sf *SrcFile) GetLastModified() (time.Time, error) { + fi, err := sf.file.Stat() + if err != nil { + return time.Now(), err + } + return fi.ModTime(), nil +} diff --git a/vendor/github.com/wiggin77/cfg/srcmap.go b/vendor/github.com/wiggin77/cfg/srcmap.go new file mode 100644 index 0000000..321db27 --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/srcmap.go @@ -0,0 +1,78 @@ +package cfg + +import ( + "time" +) + +// SrcMap is a configuration `Source` backed by a simple map. +type SrcMap struct { + AbstractSourceMonitor + m map[string]string + lm time.Time +} + +// NewSrcMap creates an empty `SrcMap`. +func NewSrcMap() *SrcMap { + sm := &SrcMap{} + sm.m = make(map[string]string) + sm.lm = time.Now() + sm.freq = time.Minute + return sm +} + +// NewSrcMapFromMap creates a `SrcMap` containing a copy of the +// specified map. +func NewSrcMapFromMap(mapIn map[string]string) *SrcMap { + sm := NewSrcMap() + sm.PutAll(mapIn) + return sm +} + +// Put inserts or updates a value in the `SrcMap`. +func (sm *SrcMap) Put(key string, val string) { + sm.mutex.Lock() + sm.m[key] = val + sm.lm = time.Now() + sm.mutex.Unlock() +} + +// PutAll inserts a copy of `mapIn` into the `SrcMap` +func (sm *SrcMap) PutAll(mapIn map[string]string) { + sm.mutex.Lock() + defer sm.mutex.Unlock() + + for k, v := range mapIn { + sm.m[k] = v + } + sm.lm = time.Now() +} + +// GetProps fetches all the properties from a source and returns +// them as a map. +func (sm *SrcMap) GetProps() (m map[string]string, err error) { + sm.mutex.RLock() + m = sm.m + sm.mutex.RUnlock() + return +} + +// GetLastModified returns the time of the latest modification to any +// property value within the source. If a source does not support +// modifying properties at runtime then the zero value for `Time` +// should be returned to ensure reload events are not generated. +func (sm *SrcMap) GetLastModified() (last time.Time, err error) { + sm.mutex.RLock() + last = sm.lm + sm.mutex.RUnlock() + return +} + +// GetMonitorFreq returns the frequency as a `time.Duration` between +// checks for changes to this config source. Defaults to 1 minute +// unless changed with `SetMonitorFreq`. +func (sm *SrcMap) GetMonitorFreq() (freq time.Duration) { + sm.mutex.RLock() + freq = sm.freq + sm.mutex.RUnlock() + return +} diff --git a/vendor/github.com/wiggin77/cfg/timeconv/parse.go b/vendor/github.com/wiggin77/cfg/timeconv/parse.go new file mode 100644 index 0000000..218ef43 --- /dev/null +++ b/vendor/github.com/wiggin77/cfg/timeconv/parse.go @@ -0,0 +1,108 @@ +package timeconv + +import ( + "fmt" + "math" + "regexp" + "strconv" + "strings" +) + +// MillisPerSecond is the number of millseconds per second. +const MillisPerSecond int64 = 1000 + +// MillisPerMinute is the number of millseconds per minute. +const MillisPerMinute int64 = MillisPerSecond * 60 + +// MillisPerHour is the number of millseconds per hour. +const MillisPerHour int64 = MillisPerMinute * 60 + +// MillisPerDay is the number of millseconds per day. +const MillisPerDay int64 = MillisPerHour * 24 + +// MillisPerWeek is the number of millseconds per week. +const MillisPerWeek int64 = MillisPerDay * 7 + +// MillisPerYear is the approximate number of millseconds per year. +const MillisPerYear int64 = MillisPerDay*365 + int64((float64(MillisPerDay) * 0.25)) + +// ParseMilliseconds parses a string containing a number plus +// a unit of measure for time and returns the number of milliseconds +// it represents. +// +// Example: +// * "1 second" returns 1000 +// * "1 minute" returns 60000 +// * "1 hour" returns 3600000 +// +// See config.UnitsToMillis for a list of supported units of measure. +func ParseMilliseconds(str string) (int64, error) { + s := strings.TrimSpace(str) + reg := regexp.MustCompile("([0-9\\.\\-+]*)(.*)") + matches := reg.FindStringSubmatch(s) + if matches == nil || len(matches) < 1 || matches[1] == "" { + return 0, fmt.Errorf("invalid syntax - '%s'", s) + } + digits := matches[1] + units := "ms" + if len(matches) > 1 && matches[2] != "" { + units = matches[2] + } + + fDigits, err := strconv.ParseFloat(digits, 64) + if err != nil { + return 0, err + } + + msPerUnit, err := UnitsToMillis(units) + if err != nil { + return 0, err + } + + // Check for overflow. + fms := float64(msPerUnit) * fDigits + if fms > math.MaxInt64 || fms < math.MinInt64 { + return 0, fmt.Errorf("out of range - '%s' overflows", s) + } + ms := int64(fms) + return ms, nil +} + +// UnitsToMillis returns the number of milliseconds represented by the specified unit of measure. +// +// Example: +// * "second" returns 1000
    +// * "minute" returns 60000
    +// * "hour" returns 3600000
    +// +// Supported units of measure: +// * "milliseconds", "millis", "ms", "millisecond" +// * "seconds", "sec", "s", "second" +// * "minutes", "mins", "min", "m", "minute" +// * "hours", "h", "hour" +// * "days", "d", "day" +// * "weeks", "w", "week" +// * "years", "y", "year" +func UnitsToMillis(units string) (ms int64, err error) { + u := strings.TrimSpace(units) + u = strings.ToLower(u) + switch u { + case "milliseconds", "millisecond", "millis", "ms": + ms = 1 + case "seconds", "second", "sec", "s": + ms = MillisPerSecond + case "minutes", "minute", "mins", "min", "m": + ms = MillisPerMinute + case "hours", "hour", "h": + ms = MillisPerHour + case "days", "day", "d": + ms = MillisPerDay + case "weeks", "week", "w": + ms = MillisPerWeek + case "years", "year", "y": + ms = MillisPerYear + default: + err = fmt.Errorf("invalid syntax - '%s' not a supported unit of measure", u) + } + return +} diff --git a/vendor/github.com/wiggin77/merror/.gitignore b/vendor/github.com/wiggin77/merror/.gitignore new file mode 100644 index 0000000..f1c181e --- /dev/null +++ b/vendor/github.com/wiggin77/merror/.gitignore @@ -0,0 +1,12 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/github.com/wiggin77/merror/.travis.yml b/vendor/github.com/wiggin77/merror/.travis.yml new file mode 100644 index 0000000..9899b38 --- /dev/null +++ b/vendor/github.com/wiggin77/merror/.travis.yml @@ -0,0 +1,5 @@ +language: go +sudo: false +before_script: + - go vet ./... + \ No newline at end of file diff --git a/vendor/github.com/wiggin77/merror/LICENSE b/vendor/github.com/wiggin77/merror/LICENSE new file mode 100644 index 0000000..2b0bf7e --- /dev/null +++ b/vendor/github.com/wiggin77/merror/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 wiggin77 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/wiggin77/merror/README.md b/vendor/github.com/wiggin77/merror/README.md new file mode 100644 index 0000000..964c7ac --- /dev/null +++ b/vendor/github.com/wiggin77/merror/README.md @@ -0,0 +1,6 @@ +# merror + +[![GoDoc](https://godoc.org/github.com/wiggin77/merror?status.svg)](https://godoc.org/github.com/wiggin77/merror) +[![Build Status](https://travis-ci.org/wiggin77/merror.svg?branch=master)](https://travis-ci.org/wiggin77/merror) + +Multiple Error aggregator for Go. diff --git a/vendor/github.com/wiggin77/merror/format.go b/vendor/github.com/wiggin77/merror/format.go new file mode 100644 index 0000000..8ba9aa8 --- /dev/null +++ b/vendor/github.com/wiggin77/merror/format.go @@ -0,0 +1,43 @@ +package merror + +import ( + "fmt" + "strings" +) + +// FormatterFunc is a function that converts a merror +// to a string. +type FormatterFunc func(merr *MError) string + +// GlobalFormatter is the global merror formatter. +// Set this to a custom formatter if desired. +var GlobalFormatter = defaultFormatter + +// defaultFormatter +func defaultFormatter(merr *MError) string { + count := 0 + overflow := 0 + + var format func(sb *strings.Builder, merr *MError, indent string) + format = func(sb *strings.Builder, merr *MError, indent string) { + count += merr.Len() + overflow += merr.Overflow() + + fmt.Fprintf(sb, "%sMError:\n", indent) + for _, err := range merr.Errors() { + if e, ok := err.(*MError); ok { + format(sb, e, indent+" ") + } else { + fmt.Fprintf(sb, "%s%s\n", indent, err.Error()) + } + } + } + + sb := &strings.Builder{} + format(sb, merr, "") + fmt.Fprintf(sb, "%d errors total.\n", count) + if merr.overflow > 0 { + fmt.Fprintf(sb, "%d errors truncated.\n", overflow) + } + return sb.String() +} diff --git a/vendor/github.com/wiggin77/merror/go.mod b/vendor/github.com/wiggin77/merror/go.mod new file mode 100644 index 0000000..ba207ac --- /dev/null +++ b/vendor/github.com/wiggin77/merror/go.mod @@ -0,0 +1,5 @@ +module github.com/wiggin77/merror + +go 1.15 + +require github.com/stretchr/testify v1.6.1 diff --git a/vendor/github.com/wiggin77/merror/go.sum b/vendor/github.com/wiggin77/merror/go.sum new file mode 100644 index 0000000..56d62e7 --- /dev/null +++ b/vendor/github.com/wiggin77/merror/go.sum @@ -0,0 +1,10 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/wiggin77/merror/merror.go b/vendor/github.com/wiggin77/merror/merror.go new file mode 100644 index 0000000..def7535 --- /dev/null +++ b/vendor/github.com/wiggin77/merror/merror.go @@ -0,0 +1,120 @@ +package merror + +import "sync" + +// MError represents zero or more errors that can be +// accumulated via the `Append` method. +type MError struct { + cap int + + mux sync.RWMutex + errors []error + overflow int + formatter FormatterFunc +} + +// New returns a new instance of `MError` with no limit on the +// number of errors that can be appended. +func New() *MError { + me := &MError{} + me.errors = make([]error, 0, 10) + return me +} + +// NewWithCap returns a new instance of `MError` with a maximum +// capacity of `cap` errors. If exceeded only the overflow counter +// will be incremented. +// +// A `cap` of zero of less means no cap and max size of a slice +// on the current platform is the upper bound. +func NewWithCap(cap int) *MError { + me := New() + me.cap = cap + return me +} + +// Append adds an error to the aggregated error list. +func (me *MError) Append(err error) { + if err == nil { + return + } + + me.mux.Lock() + defer me.mux.Unlock() + + if me.cap > 0 && len(me.errors) >= me.cap { + me.overflow++ + } else { + me.errors = append(me.errors, err) + } +} + +// Errors returns a slice of the `error` instances that have been +// appended to this `MError`. +func (me *MError) Errors() []error { + me.mux.RLock() + defer me.mux.RUnlock() + + errs := make([]error, len(me.errors)) + copy(errs, me.errors) + + return errs +} + +// Len returns the number of errors that have been appended. +func (me *MError) Len() int { + me.mux.RLock() + defer me.mux.RUnlock() + + return len(me.errors) +} + +// Overflow returns the number of errors that have been truncated +// because maximum capacity was exceeded. +func (me *MError) Overflow() int { + me.mux.RLock() + defer me.mux.RUnlock() + + return me.overflow +} + +// SetFormatter sets the `FormatterFunc` to be used when `Error` is +// called. The previous `FormatterFunc` is returned. +func (me *MError) SetFormatter(f FormatterFunc) (old FormatterFunc) { + me.mux.Lock() + defer me.mux.Unlock() + + old = me.formatter + me.formatter = f + return +} + +// ErrorOrNil returns nil if this `MError` contains no errors, +// otherwise this `MError` is returned. +func (me *MError) ErrorOrNil() error { + if me == nil { + return nil + } + + me.mux.RLock() + defer me.mux.RUnlock() + + if len(me.errors) == 0 { + return nil + } + return me +} + +// Error returns a string representation of this MError. +// The output format depends on the `Formatter` set for this +// merror instance, or the global formatter if none set. +func (me *MError) Error() string { + me.mux.RLock() + defer me.mux.RUnlock() + + f := me.formatter + if f == nil { + f = GlobalFormatter + } + return f(me) +} diff --git a/vendor/github.com/wiggin77/srslog/.gitignore b/vendor/github.com/wiggin77/srslog/.gitignore new file mode 100644 index 0000000..ebf0f2e --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/.gitignore @@ -0,0 +1 @@ +.cover diff --git a/vendor/github.com/wiggin77/srslog/.travis.yml b/vendor/github.com/wiggin77/srslog/.travis.yml new file mode 100644 index 0000000..921150e --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/.travis.yml @@ -0,0 +1,15 @@ +sudo: required +dist: trusty +group: edge +language: go +go: +- 1.5 +before_install: + - pip install --user codecov +script: +- | + go get ./... + go test -v -coverprofile=coverage.txt -covermode=atomic + go vet +after_success: + - codecov diff --git a/vendor/github.com/wiggin77/srslog/CODE_OF_CONDUCT.md b/vendor/github.com/wiggin77/srslog/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..18ac49f --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/CODE_OF_CONDUCT.md @@ -0,0 +1,50 @@ +# Contributor Code of Conduct + +As contributors and maintainers of this project, and in the interest of +fostering an open and welcoming community, we pledge to respect all people who +contribute through reporting issues, posting feature requests, updating +documentation, submitting pull requests or patches, and other activities. + +We are committed to making participation in this project a harassment-free +experience for everyone, regardless of level of experience, gender, gender +identity and expression, sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, such as physical or electronic + addresses, without explicit permission +* Other unethical or unprofessional conduct + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +By adopting this Code of Conduct, project maintainers commit themselves to +fairly and consistently applying these principles to every aspect of managing +this project. Project maintainers who do not follow or enforce the Code of +Conduct may be permanently removed from the project team. + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting a project maintainer at [sirsean@gmail.com]. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. Maintainers are +obligated to maintain confidentiality with regard to the reporter of an +incident. + + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 1.3.0, available at +[http://contributor-covenant.org/version/1/3/0/][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/3/0/ diff --git a/vendor/github.com/wiggin77/srslog/LICENSE b/vendor/github.com/wiggin77/srslog/LICENSE new file mode 100644 index 0000000..9269338 --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2015 Rackspace. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/wiggin77/srslog/README.md b/vendor/github.com/wiggin77/srslog/README.md new file mode 100644 index 0000000..dcacc34 --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/README.md @@ -0,0 +1,147 @@ +[![Build Status](https://travis-ci.org/RackSec/srslog.svg?branch=master)](https://travis-ci.org/RackSec/srslog) + +# srslog + +Go has a `syslog` package in the standard library, but it has the following +shortcomings: + +1. It doesn't have TLS support +2. [According to bradfitz on the Go team, it is no longer being maintained.](https://github.com/golang/go/issues/13449#issuecomment-161204716) + +I agree that it doesn't need to be in the standard library. So, I've +followed Brad's suggestion and have made a separate project to handle syslog. + +This code was taken directly from the Go project as a base to start from. + +However, this _does_ have TLS support. + +# Usage + +Basic usage retains the same interface as the original `syslog` package. We +only added to the interface where required to support new functionality. + +Switch from the standard library: + +``` +import( + //"log/syslog" + syslog "github.com/RackSec/srslog" +) +``` + +You can still use it for local syslog: + +``` +w, err := syslog.Dial("", "", syslog.LOG_ERR, "testtag") +``` + +Or to unencrypted UDP: + +``` +w, err := syslog.Dial("udp", "192.168.0.50:514", syslog.LOG_ERR, "testtag") +``` + +Or to unencrypted TCP: + +``` +w, err := syslog.Dial("tcp", "192.168.0.51:514", syslog.LOG_ERR, "testtag") +``` + +But now you can also send messages via TLS-encrypted TCP: + +``` +w, err := syslog.DialWithTLSCertPath("tcp+tls", "192.168.0.52:514", syslog.LOG_ERR, "testtag", "/path/to/servercert.pem") +``` + +And if you need more control over your TLS configuration : + +``` +pool := x509.NewCertPool() +serverCert, err := ioutil.ReadFile("/path/to/servercert.pem") +if err != nil { + return nil, err +} +pool.AppendCertsFromPEM(serverCert) +config := tls.Config{ + RootCAs: pool, +} + +w, err := DialWithTLSConfig(network, raddr, priority, tag, &config) +``` + +(Note that in both TLS cases, this uses a self-signed certificate, where the +remote syslog server has the keypair and the client has only the public key.) + +And then to write log messages, continue like so: + +``` +if err != nil { + log.Fatal("failed to connect to syslog:", err) +} +defer w.Close() + +w.Alert("this is an alert") +w.Crit("this is critical") +w.Err("this is an error") +w.Warning("this is a warning") +w.Notice("this is a notice") +w.Info("this is info") +w.Debug("this is debug") +w.Write([]byte("these are some bytes")) +``` + +If you need further control over connection attempts, you can use the DialWithCustomDialer +function. To continue with the DialWithTLSConfig example: + +``` +netDialer := &net.Dialer{Timeout: time.Second*5} // easy timeouts +realNetwork := "tcp" // real network, other vars your dail func can close over +dial := func(network, addr string) (net.Conn, error) { + // cannot use "network" here as it'll simply be "custom" which will fail + return tls.DialWithDialer(netDialer, realNetwork, addr, &config) +} + +w, err := DialWithCustomDialer("custom", "192.168.0.52:514", syslog.LOG_ERR, "testtag", dial) +``` + +Your custom dial func can set timeouts, proxy connections, and do whatever else it needs before returning a net.Conn. + +# Generating TLS Certificates + +We've provided a script that you can use to generate a self-signed keypair: + +``` +pip install cryptography +python script/gen-certs.py +``` + +That outputs the public key and private key to standard out. Put those into +`.pem` files. (And don't put them into any source control. The certificate in +the `test` directory is used by the unit tests, and please do not actually use +it anywhere else.) + +# Running Tests + +Run the tests as usual: + +``` +go test +``` + +But we've also provided a test coverage script that will show you which +lines of code are not covered: + +``` +script/coverage --html +``` + +That will open a new browser tab showing coverage information. + +# License + +This project uses the New BSD License, the same as the Go project itself. + +# Code of Conduct + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. diff --git a/vendor/github.com/wiggin77/srslog/constants.go b/vendor/github.com/wiggin77/srslog/constants.go new file mode 100644 index 0000000..600801e --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/constants.go @@ -0,0 +1,68 @@ +package srslog + +import ( + "errors" +) + +// Priority is a combination of the syslog facility and +// severity. For example, LOG_ALERT | LOG_FTP sends an alert severity +// message from the FTP facility. The default severity is LOG_EMERG; +// the default facility is LOG_KERN. +type Priority int + +const severityMask = 0x07 +const facilityMask = 0xf8 + +const ( + // Severity. + + // From /usr/include/sys/syslog.h. + // These are the same on Linux, BSD, and OS X. + LOG_EMERG Priority = iota + LOG_ALERT + LOG_CRIT + LOG_ERR + LOG_WARNING + LOG_NOTICE + LOG_INFO + LOG_DEBUG +) + +const ( + // Facility. + + // From /usr/include/sys/syslog.h. + // These are the same up to LOG_FTP on Linux, BSD, and OS X. + LOG_KERN Priority = iota << 3 + LOG_USER + LOG_MAIL + LOG_DAEMON + LOG_AUTH + LOG_SYSLOG + LOG_LPR + LOG_NEWS + LOG_UUCP + LOG_CRON + LOG_AUTHPRIV + LOG_FTP + _ // unused + _ // unused + _ // unused + _ // unused + LOG_LOCAL0 + LOG_LOCAL1 + LOG_LOCAL2 + LOG_LOCAL3 + LOG_LOCAL4 + LOG_LOCAL5 + LOG_LOCAL6 + LOG_LOCAL7 +) + +func validatePriority(p Priority) error { + if p < 0 || p > LOG_LOCAL7|LOG_DEBUG { + return errors.New("log/syslog: invalid priority") + } else { + return nil + } +} diff --git a/vendor/github.com/wiggin77/srslog/dialer.go b/vendor/github.com/wiggin77/srslog/dialer.go new file mode 100644 index 0000000..1ecf29b --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/dialer.go @@ -0,0 +1,104 @@ +package srslog + +import ( + "crypto/tls" + "net" +) + +// dialerFunctionWrapper is a simple object that consists of a dialer function +// and its name. This is primarily for testing, so we can make sure that the +// getDialer method returns the correct dialer function. However, if you ever +// find that you need to check which dialer function you have, this would also +// be useful for you without having to use reflection. +type dialerFunctionWrapper struct { + Name string + Dialer func() (serverConn, string, error) +} + +// Call the wrapped dialer function and return its return values. +func (df dialerFunctionWrapper) Call() (serverConn, string, error) { + return df.Dialer() +} + +// getDialer returns a "dialer" function that can be called to connect to a +// syslog server. +// +// Each dialer function is responsible for dialing the remote host and returns +// a serverConn, the hostname (or a default if the Writer has not specified a +// hostname), and an error in case dialing fails. +// +// The reason for separate dialers is that different network types may need +// to dial their connection differently, yet still provide a net.Conn interface +// that you can use once they have dialed. Rather than an increasingly long +// conditional, we have a map of network -> dialer function (with a sane default +// value), and adding a new network type is as easy as writing the dialer +// function and adding it to the map. +func (w *Writer) getDialer() dialerFunctionWrapper { + dialers := map[string]dialerFunctionWrapper{ + "": dialerFunctionWrapper{"unixDialer", w.unixDialer}, + "tcp+tls": dialerFunctionWrapper{"tlsDialer", w.tlsDialer}, + "custom": dialerFunctionWrapper{"customDialer", w.customDialer}, + } + dialer, ok := dialers[w.network] + if !ok { + dialer = dialerFunctionWrapper{"basicDialer", w.basicDialer} + } + return dialer +} + +// unixDialer uses the unixSyslog method to open a connection to the syslog +// daemon running on the local machine. +func (w *Writer) unixDialer() (serverConn, string, error) { + sc, err := unixSyslog() + hostname := w.hostname + if hostname == "" { + hostname = "localhost" + } + return sc, hostname, err +} + +// tlsDialer connects to TLS over TCP, and is used for the "tcp+tls" network +// type. +func (w *Writer) tlsDialer() (serverConn, string, error) { + c, err := tls.Dial("tcp", w.raddr, w.tlsConfig) + var sc serverConn + hostname := w.hostname + if err == nil { + sc = newNetConn(c) + if hostname == "" { + hostname = c.LocalAddr().String() + } + } + return sc, hostname, err +} + +// basicDialer is the most common dialer for syslog, and supports both TCP and +// UDP connections. +func (w *Writer) basicDialer() (serverConn, string, error) { + c, err := net.Dial(w.network, w.raddr) + var sc serverConn + hostname := w.hostname + if err == nil { + sc = newNetConn(c) + if hostname == "" { + hostname = c.LocalAddr().String() + } + } + return sc, hostname, err +} + +// customDialer uses the custom dialer when the Writer was created +// giving developers total control over how connections are made and returned. +// Note it does not check if cdialer is nil, as it should only be referenced from getDialer. +func (w *Writer) customDialer() (serverConn, string, error) { + c, err := w.customDial(w.network, w.raddr) + var sc serverConn + hostname := w.hostname + if err == nil { + sc = newNetConn(c) + if hostname == "" { + hostname = c.LocalAddr().String() + } + } + return sc, hostname, err +} diff --git a/vendor/github.com/wiggin77/srslog/formatter.go b/vendor/github.com/wiggin77/srslog/formatter.go new file mode 100644 index 0000000..e306fd6 --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/formatter.go @@ -0,0 +1,58 @@ +package srslog + +import ( + "fmt" + "os" + "time" +) + +const appNameMaxLength = 48 // limit to 48 chars as per RFC5424 + +// Formatter is a type of function that takes the consituent parts of a +// syslog message and returns a formatted string. A different Formatter is +// defined for each different syslog protocol we support. +type Formatter func(p Priority, hostname, tag, content string) string + +// DefaultFormatter is the original format supported by the Go syslog package, +// and is a non-compliant amalgamation of 3164 and 5424 that is intended to +// maximize compatibility. +func DefaultFormatter(p Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.RFC3339) + msg := fmt.Sprintf("<%d> %s %s %s[%d]: %s", + p, timestamp, hostname, tag, os.Getpid(), content) + return msg +} + +// UnixFormatter omits the hostname, because it is only used locally. +func UnixFormatter(p Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.Stamp) + msg := fmt.Sprintf("<%d>%s %s[%d]: %s", + p, timestamp, tag, os.Getpid(), content) + return msg +} + +// RFC3164Formatter provides an RFC 3164 compliant message. +func RFC3164Formatter(p Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.Stamp) + msg := fmt.Sprintf("<%d>%s %s %s[%d]: %s", + p, timestamp, hostname, tag, os.Getpid(), content) + return msg +} + +// if string's length is greater than max, then use the last part +func truncateStartStr(s string, max int) string { + if (len(s) > max) { + return s[len(s) - max:] + } + return s +} + +// RFC5424Formatter provides an RFC 5424 compliant message. +func RFC5424Formatter(p Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.RFC3339) + pid := os.Getpid() + appName := truncateStartStr(os.Args[0], appNameMaxLength) + msg := fmt.Sprintf("<%d>%d %s %s %s %d %s - %s", + p, 1, timestamp, hostname, appName, pid, tag, content) + return msg +} diff --git a/vendor/github.com/wiggin77/srslog/framer.go b/vendor/github.com/wiggin77/srslog/framer.go new file mode 100644 index 0000000..ab46f0d --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/framer.go @@ -0,0 +1,24 @@ +package srslog + +import ( + "fmt" +) + +// Framer is a type of function that takes an input string (typically an +// already-formatted syslog message) and applies "message framing" to it. We +// have different framers because different versions of the syslog protocol +// and its transport requirements define different framing behavior. +type Framer func(in string) string + +// DefaultFramer does nothing, since there is no framing to apply. This is +// the original behavior of the Go syslog package, and is also typically used +// for UDP syslog. +func DefaultFramer(in string) string { + return in +} + +// RFC5425MessageLengthFramer prepends the message length to the front of the +// provided message, as defined in RFC 5425. +func RFC5425MessageLengthFramer(in string) string { + return fmt.Sprintf("%d %s", len(in), in) +} diff --git a/vendor/github.com/wiggin77/srslog/go.mod b/vendor/github.com/wiggin77/srslog/go.mod new file mode 100644 index 0000000..393b076 --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/go.mod @@ -0,0 +1,3 @@ +module github.com/wiggin77/srslog + +go 1.14 diff --git a/vendor/github.com/wiggin77/srslog/logger.go b/vendor/github.com/wiggin77/srslog/logger.go new file mode 100644 index 0000000..3a73856 --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/logger.go @@ -0,0 +1,13 @@ +package srslog + +import ( + "io/ioutil" + "log" +) + +var Logger log.Logger + +func init() { + Logger = log.Logger{} + Logger.SetOutput(ioutil.Discard) +} diff --git a/vendor/github.com/wiggin77/srslog/net_conn.go b/vendor/github.com/wiggin77/srslog/net_conn.go new file mode 100644 index 0000000..f3cfeb6 --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/net_conn.go @@ -0,0 +1,76 @@ +package srslog + +import ( + "io" + "net" + "time" +) + +// netConn has an internal net.Conn and adheres to the serverConn interface, +// allowing us to send syslog messages over the network. +type netConn struct { + conn net.Conn + done chan interface{} +} + +// newNetConn creates a netConn instance that is monitored for unexpected socket closure. +func newNetConn(conn net.Conn) *netConn { + nc := &netConn{conn: conn, done: make(chan interface{})} + go monitor(nc.conn, nc.done) + return nc +} + +// writeString formats syslog messages using time.RFC3339 and includes the +// hostname, and sends the message to the connection. +func (n *netConn) writeString(framer Framer, formatter Formatter, p Priority, hostname, tag, msg string) error { + if framer == nil { + framer = DefaultFramer + } + if formatter == nil { + formatter = DefaultFormatter + } + formattedMessage := framer(formatter(p, hostname, tag, msg)) + _, err := n.conn.Write([]byte(formattedMessage)) + return err +} + +// close the network connection +func (n *netConn) close() error { + // signal monitor goroutine to exit + close(n.done) + // wake up monitor blocked on read (close usually is enough) + _ = n.conn.SetReadDeadline(time.Now()) + // close the connection + return n.conn.Close() +} + +// monitor continuously tries to read from the connection to detect socket close. +// This is needed because syslog server uses a write only socket and Linux systems +// take a long time to detect a loss of connectivity on a socket when only writing; +// the writes simply fail without an error returned. +func monitor(conn net.Conn, done chan interface{}) { + defer Logger.Println("monitor exit") + + buf := make([]byte, 1) + for { + Logger.Println("monitor loop") + + select { + case <-done: + return + case <-time.After(1 * time.Second): + } + + err := conn.SetReadDeadline(time.Now().Add(time.Second * 30)) + if err != nil { + continue + } + + _, err = conn.Read(buf) + Logger.Println("monitor -- ", err) + if err == io.EOF { + Logger.Println("monitor close conn") + conn.Close() + } + } +} diff --git a/vendor/github.com/wiggin77/srslog/srslog.go b/vendor/github.com/wiggin77/srslog/srslog.go new file mode 100644 index 0000000..b47ad72 --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/srslog.go @@ -0,0 +1,125 @@ +package srslog + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "io/ioutil" + "log" + "net" + "os" +) + +// This interface allows us to work with both local and network connections, +// and enables Solaris support (see syslog_unix.go). +type serverConn interface { + writeString(framer Framer, formatter Formatter, p Priority, hostname, tag, s string) error + close() error +} + +// DialFunc is the function signature to be used for a custom dialer callback +// with DialWithCustomDialer +type DialFunc func(string, string) (net.Conn, error) + +// New establishes a new connection to the system log daemon. Each +// write to the returned Writer sends a log message with the given +// priority and prefix. +func New(priority Priority, tag string) (w *Writer, err error) { + return Dial("", "", priority, tag) +} + +// Dial establishes a connection to a log daemon by connecting to +// address raddr on the specified network. Each write to the returned +// Writer sends a log message with the given facility, severity and +// tag. +// If network is empty, Dial will connect to the local syslog server. +func Dial(network, raddr string, priority Priority, tag string) (*Writer, error) { + return DialWithTLSConfig(network, raddr, priority, tag, nil) +} + +// ErrNilDialFunc is returned from DialWithCustomDialer when a nil DialFunc is passed, +// avoiding a nil pointer deference panic. +var ErrNilDialFunc = errors.New("srslog: nil DialFunc passed to DialWithCustomDialer") + +// DialWithCustomDialer establishes a connection by calling customDial. +// Each write to the returned Writer sends a log message with the given facility, severity and tag. +// Network must be "custom" in order for this package to use customDial. +// While network and raddr will be passed to customDial, it is allowed for customDial to ignore them. +// If customDial is nil, this function returns ErrNilDialFunc. +func DialWithCustomDialer(network, raddr string, priority Priority, tag string, customDial DialFunc) (*Writer, error) { + if customDial == nil { + return nil, ErrNilDialFunc + } + return dialAllParameters(network, raddr, priority, tag, nil, customDial) +} + +// DialWithTLSCertPath establishes a secure connection to a log daemon by connecting to +// address raddr on the specified network. It uses certPath to load TLS certificates and configure +// the secure connection. +func DialWithTLSCertPath(network, raddr string, priority Priority, tag, certPath string) (*Writer, error) { + serverCert, err := ioutil.ReadFile(certPath) + if err != nil { + return nil, err + } + + return DialWithTLSCert(network, raddr, priority, tag, serverCert) +} + +// DialWIthTLSCert establishes a secure connection to a log daemon by connecting to +// address raddr on the specified network. It uses serverCert to load a TLS certificate +// and configure the secure connection. +func DialWithTLSCert(network, raddr string, priority Priority, tag string, serverCert []byte) (*Writer, error) { + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(serverCert) + config := tls.Config{ + RootCAs: pool, + } + + return DialWithTLSConfig(network, raddr, priority, tag, &config) +} + +// DialWithTLSConfig establishes a secure connection to a log daemon by connecting to +// address raddr on the specified network. It uses tlsConfig to configure the secure connection. +func DialWithTLSConfig(network, raddr string, priority Priority, tag string, tlsConfig *tls.Config) (*Writer, error) { + return dialAllParameters(network, raddr, priority, tag, tlsConfig, nil) +} + +// implementation of the various functions above +func dialAllParameters(network, raddr string, priority Priority, tag string, tlsConfig *tls.Config, customDial DialFunc) (*Writer, error) { + if err := validatePriority(priority); err != nil { + return nil, err + } + + if tag == "" { + tag = os.Args[0] + } + hostname, _ := os.Hostname() + + w := &Writer{ + priority: priority, + tag: tag, + hostname: hostname, + network: network, + raddr: raddr, + tlsConfig: tlsConfig, + customDial: customDial, + } + + _, err := w.connect() + if err != nil { + return nil, err + } + return w, err +} + +// NewLogger creates a log.Logger whose output is written to +// the system log service with the specified priority. The logFlag +// argument is the flag set passed through to log.New to create +// the Logger. +func NewLogger(p Priority, logFlag int) (*log.Logger, error) { + s, err := New(p, "") + if err != nil { + return nil, err + } + return log.New(s, "", logFlag), nil +} diff --git a/vendor/github.com/wiggin77/srslog/srslog_unix.go b/vendor/github.com/wiggin77/srslog/srslog_unix.go new file mode 100644 index 0000000..a04d939 --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/srslog_unix.go @@ -0,0 +1,54 @@ +package srslog + +import ( + "errors" + "io" + "net" +) + +// unixSyslog opens a connection to the syslog daemon running on the +// local machine using a Unix domain socket. This function exists because of +// Solaris support as implemented by gccgo. On Solaris you can not +// simply open a TCP connection to the syslog daemon. The gccgo +// sources have a syslog_solaris.go file that implements unixSyslog to +// return a type that satisfies the serverConn interface and simply calls the C +// library syslog function. +func unixSyslog() (conn serverConn, err error) { + logTypes := []string{"unixgram", "unix"} + logPaths := []string{"/dev/log", "/var/run/syslog", "/var/run/log"} + for _, network := range logTypes { + for _, path := range logPaths { + conn, err := net.Dial(network, path) + if err != nil { + continue + } else { + return &localConn{conn: conn}, nil + } + } + } + return nil, errors.New("Unix syslog delivery error") +} + +// localConn adheres to the serverConn interface, allowing us to send syslog +// messages to the local syslog daemon over a Unix domain socket. +type localConn struct { + conn io.WriteCloser +} + +// writeString formats syslog messages using time.Stamp instead of time.RFC3339, +// and omits the hostname (because it is expected to be used locally). +func (n *localConn) writeString(framer Framer, formatter Formatter, p Priority, hostname, tag, msg string) error { + if framer == nil { + framer = DefaultFramer + } + if formatter == nil { + formatter = UnixFormatter + } + _, err := n.conn.Write([]byte(framer(formatter(p, hostname, tag, msg)))) + return err +} + +// close the (local) network connection +func (n *localConn) close() error { + return n.conn.Close() +} diff --git a/vendor/github.com/wiggin77/srslog/writer.go b/vendor/github.com/wiggin77/srslog/writer.go new file mode 100644 index 0000000..86bccba --- /dev/null +++ b/vendor/github.com/wiggin77/srslog/writer.go @@ -0,0 +1,201 @@ +package srslog + +import ( + "crypto/tls" + "strings" + "sync" +) + +// A Writer is a connection to a syslog server. +type Writer struct { + priority Priority + tag string + hostname string + network string + raddr string + tlsConfig *tls.Config + framer Framer + formatter Formatter + + //non-nil if custom dialer set, used in getDialer + customDial DialFunc + + mu sync.RWMutex // guards conn + conn serverConn +} + +// getConn provides access to the internal conn, protected by a mutex. The +// conn is threadsafe, so it can be used while unlocked, but we want to avoid +// race conditions on grabbing a reference to it. +func (w *Writer) getConn() serverConn { + w.mu.RLock() + conn := w.conn + w.mu.RUnlock() + return conn +} + +// setConn updates the internal conn, protected by a mutex. +func (w *Writer) setConn(c serverConn) { + w.mu.Lock() + w.conn = c + w.mu.Unlock() +} + +// connect makes a connection to the syslog server. +func (w *Writer) connect() (serverConn, error) { + conn := w.getConn() + if conn != nil { + // ignore err from close, it makes sense to continue anyway + conn.close() + w.setConn(nil) + } + + var hostname string + var err error + dialer := w.getDialer() + conn, hostname, err = dialer.Call() + if err == nil { + w.setConn(conn) + w.hostname = hostname + + return conn, nil + } else { + return nil, err + } +} + +// SetFormatter changes the formatter function for subsequent messages. +func (w *Writer) SetFormatter(f Formatter) { + w.formatter = f +} + +// SetFramer changes the framer function for subsequent messages. +func (w *Writer) SetFramer(f Framer) { + w.framer = f +} + +// SetHostname changes the hostname for syslog messages if needed. +func (w *Writer) SetHostname(hostname string) { + w.hostname = hostname +} + +// Write sends a log message to the syslog daemon using the default priority +// passed into `srslog.New` or the `srslog.Dial*` functions. +func (w *Writer) Write(b []byte) (int, error) { + return w.writeAndRetry(w.priority, string(b)) +} + +// WriteWithPriority sends a log message with a custom priority. +func (w *Writer) WriteWithPriority(p Priority, b []byte) (int, error) { + return w.writeAndRetryWithPriority(p, string(b)) +} + +// Close closes a connection to the syslog daemon. +func (w *Writer) Close() error { + conn := w.getConn() + if conn != nil { + err := conn.close() + w.setConn(nil) + return err + } + return nil +} + +// Emerg logs a message with severity LOG_EMERG; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Emerg(m string) (err error) { + _, err = w.writeAndRetry(LOG_EMERG, m) + return err +} + +// Alert logs a message with severity LOG_ALERT; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Alert(m string) (err error) { + _, err = w.writeAndRetry(LOG_ALERT, m) + return err +} + +// Crit logs a message with severity LOG_CRIT; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Crit(m string) (err error) { + _, err = w.writeAndRetry(LOG_CRIT, m) + return err +} + +// Err logs a message with severity LOG_ERR; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Err(m string) (err error) { + _, err = w.writeAndRetry(LOG_ERR, m) + return err +} + +// Warning logs a message with severity LOG_WARNING; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Warning(m string) (err error) { + _, err = w.writeAndRetry(LOG_WARNING, m) + return err +} + +// Notice logs a message with severity LOG_NOTICE; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Notice(m string) (err error) { + _, err = w.writeAndRetry(LOG_NOTICE, m) + return err +} + +// Info logs a message with severity LOG_INFO; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Info(m string) (err error) { + _, err = w.writeAndRetry(LOG_INFO, m) + return err +} + +// Debug logs a message with severity LOG_DEBUG; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Debug(m string) (err error) { + _, err = w.writeAndRetry(LOG_DEBUG, m) + return err +} + +// writeAndRetry takes a severity and the string to write. Any facility passed to +// it as part of the severity Priority will be ignored. +func (w *Writer) writeAndRetry(severity Priority, s string) (int, error) { + pr := (w.priority & facilityMask) | (severity & severityMask) + + return w.writeAndRetryWithPriority(pr, s) +} + +// writeAndRetryWithPriority differs from writeAndRetry in that it allows setting +// of both the facility and the severity. +func (w *Writer) writeAndRetryWithPriority(p Priority, s string) (int, error) { + conn := w.getConn() + if conn != nil { + if n, err := w.write(conn, p, s); err == nil { + return n, err + } + } + + var err error + if conn, err = w.connect(); err != nil { + return 0, err + } + return w.write(conn, p, s) +} + +// write generates and writes a syslog formatted string. It formats the +// message based on the current Formatter and Framer. +func (w *Writer) write(conn serverConn, p Priority, msg string) (int, error) { + // ensure it ends in a \n + if !strings.HasSuffix(msg, "\n") { + msg += "\n" + } + + err := conn.writeString(w.framer, w.formatter, p, w.hostname, w.tag, msg) + if err != nil { + return 0, err + } + // Note: return the length of the input, not the number of + // bytes printed by Fprintf, because this must behave like + // an io.Writer. + return len(msg), nil +} diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml index 6d4d1be..571116c 100644 --- a/vendor/go.uber.org/atomic/.codecov.yml +++ b/vendor/go.uber.org/atomic/.codecov.yml @@ -13,3 +13,7 @@ coverage: if_not_found: success # if parent is not found report status as success, error, or failure if_ci_failed: error # if ci fails report status as success, error, or failure +# Also update COVER_IGNORE_PKGS in the Makefile. +ignore: + - /internal/gen-atomicint/ + - /internal/gen-valuewrapper/ diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore index 0a4504f..c3fa253 100644 --- a/vendor/go.uber.org/atomic/.gitignore +++ b/vendor/go.uber.org/atomic/.gitignore @@ -1,6 +1,7 @@ +/bin .DS_Store /vendor -/cover +cover.html cover.out lint.log diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml index 0f3769e..13d0a4f 100644 --- a/vendor/go.uber.org/atomic/.travis.yml +++ b/vendor/go.uber.org/atomic/.travis.yml @@ -2,26 +2,26 @@ sudo: false language: go go_import_path: go.uber.org/atomic -go: - - 1.11.x - - 1.12.x +env: + global: + - GO111MODULE=on matrix: include: - - go: 1.12.x - env: NO_TEST=yes LINT=yes + - go: oldstable + - go: stable + env: LINT=1 cache: directories: - vendor -install: - - make install_ci +before_install: + - go version script: - - test -n "$NO_TEST" || make test_ci - - test -n "$NO_TEST" || scripts/test-ubergo.sh - - test -z "$LINT" || make install_lint lint + - test -z "$LINT" || make lint + - make cover after_success: - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md new file mode 100644 index 0000000..24c0274 --- /dev/null +++ b/vendor/go.uber.org/atomic/CHANGELOG.md @@ -0,0 +1,76 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [1.7.0] - 2020-09-14 +### Added +- Support JSON serialization and deserialization of primitive atomic types. +- Support Text marshalling and unmarshalling for string atomics. + +### Changed +- Disallow incorrect comparison of atomic values in a non-atomic way. + +### Removed +- Remove dependency on `golang.org/x/{lint, tools}`. + +## [1.6.0] - 2020-02-24 +### Changed +- Drop library dependency on `golang.org/x/{lint, tools}`. + +## [1.5.1] - 2019-11-19 +- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together + causing `CAS` to fail even though the old value matches. + +## [1.5.0] - 2019-10-29 +### Changed +- With Go modules, only the `go.uber.org/atomic` import path is supported now. + If you need to use the old import path, please add a `replace` directive to + your `go.mod`. + +## [1.4.0] - 2019-05-01 +### Added + - Add `atomic.Error` type for atomic operations on `error` values. + +## [1.3.2] - 2018-05-02 +### Added +- Add `atomic.Duration` type for atomic operations on `time.Duration` values. + +## [1.3.1] - 2017-11-14 +### Fixed +- Revert optimization for `atomic.String.Store("")` which caused data races. + +## [1.3.0] - 2017-11-13 +### Added +- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools. + +### Changed +- Optimize `atomic.String.Store("")` by avoiding an allocation. + +## [1.2.0] - 2017-04-12 +### Added +- Shadow `atomic.Value` from `sync/atomic`. + +## [1.1.0] - 2017-03-10 +### Added +- Add atomic `Float64` type. + +### Changed +- Support new `go.uber.org/atomic` import path. + +## [1.0.0] - 2016-07-18 + +- Initial release. + +[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0 +[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0 +[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1 +[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0 +[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0 +[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2 +[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1 +[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0 +[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0 +[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0 +[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0 diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile index 1ef2630..1b1376d 100644 --- a/vendor/go.uber.org/atomic/Makefile +++ b/vendor/go.uber.org/atomic/Makefile @@ -1,51 +1,78 @@ -# Many Go tools take file globs or directories as arguments instead of packages. -PACKAGE_FILES ?= *.go +# Directory to place `go install`ed binaries into. +export GOBIN ?= $(shell pwd)/bin -# For pre go1.6 -export GO15VENDOREXPERIMENT=1 +GOLINT = $(GOBIN)/golint +GEN_ATOMICINT = $(GOBIN)/gen-atomicint +GEN_ATOMICWRAPPER = $(GOBIN)/gen-atomicwrapper +STATICCHECK = $(GOBIN)/staticcheck +GO_FILES ?= $(shell find . '(' -path .git -o -path vendor ')' -prune -o -name '*.go' -print) + +# Also update ignore section in .codecov.yml. +COVER_IGNORE_PKGS = \ + go.uber.org/atomic/internal/gen-atomicint \ + go.uber.org/atomic/internal/gen-atomicwrapper .PHONY: build build: - go build -i ./... + go build ./... +.PHONY: test +test: + go test -race ./... -.PHONY: install -install: - glide --version || go get github.com/Masterminds/glide - glide install +.PHONY: gofmt +gofmt: + $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) + gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true + @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false) +$(GOLINT): + cd tools && go install golang.org/x/lint/golint -.PHONY: test -test: - go test -cover -race ./... +$(STATICCHECK): + cd tools && go install honnef.co/go/tools/cmd/staticcheck +$(GEN_ATOMICWRAPPER): $(wildcard ./internal/gen-atomicwrapper/*) + go build -o $@ ./internal/gen-atomicwrapper -.PHONY: install_ci -install_ci: install - go get github.com/wadey/gocovmerge - go get github.com/mattn/goveralls - go get golang.org/x/tools/cmd/cover +$(GEN_ATOMICINT): $(wildcard ./internal/gen-atomicint/*) + go build -o $@ ./internal/gen-atomicint -.PHONY: install_lint -install_lint: - go get golang.org/x/lint/golint +.PHONY: golint +golint: $(GOLINT) + $(GOLINT) ./... +.PHONY: staticcheck +staticcheck: $(STATICCHECK) + $(STATICCHECK) ./... .PHONY: lint -lint: - @rm -rf lint.log - @echo "Checking formatting..." - @gofmt -d -s $(PACKAGE_FILES) 2>&1 | tee lint.log - @echo "Checking vet..." - @go vet ./... 2>&1 | tee -a lint.log;) - @echo "Checking lint..." - @golint $$(go list ./...) 2>&1 | tee -a lint.log - @echo "Checking for unresolved FIXMEs..." - @git grep -i fixme | grep -v -e vendor -e Makefile | tee -a lint.log - @[ ! -s lint.log ] - - -.PHONY: test_ci -test_ci: install_ci build - ./scripts/cover.sh $(shell go list $(PACKAGES)) +lint: gofmt golint staticcheck generatenodirty + +# comma separated list of packages to consider for code coverage. +COVER_PKG = $(shell \ + go list -find ./... | \ + grep -v $(foreach pkg,$(COVER_IGNORE_PKGS),-e "^$(pkg)$$") | \ + paste -sd, -) + +.PHONY: cover +cover: + go test -coverprofile=cover.out -coverpkg $(COVER_PKG) -v ./... + go tool cover -html=cover.out -o cover.html + +.PHONY: generate +generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER) + go generate ./... + +.PHONY: generatenodirty +generatenodirty: + @[ -z "$$(git status --porcelain)" ] || ( \ + echo "Working tree is dirty. Commit your changes first."; \ + exit 1 ) + @make generate + @status=$$(git status --porcelain); \ + [ -z "$$status" ] || ( \ + echo "Working tree is dirty after `make generate`:"; \ + echo "$$status"; \ + echo "Please ensure that the generated code is up-to-date." ) diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md index 62eb8e5..ade0c20 100644 --- a/vendor/go.uber.org/atomic/README.md +++ b/vendor/go.uber.org/atomic/README.md @@ -3,9 +3,34 @@ Simple wrappers for primitive types to enforce atomic access. ## Installation -`go get -u go.uber.org/atomic` + +```shell +$ go get -u go.uber.org/atomic@v1 +``` + +### Legacy Import Path + +As of v1.5.0, the import path `go.uber.org/atomic` is the only supported way +of using this package. If you are using Go modules, this package will fail to +compile with the legacy import path path `github.com/uber-go/atomic`. + +We recommend migrating your code to the new import path but if you're unable +to do so, or if your dependencies are still using the old import path, you +will have to add a `replace` directive to your `go.mod` file downgrading the +legacy import path to an older version. + +``` +replace github.com/uber-go/atomic => github.com/uber-go/atomic v1.4.0 +``` + +You can do so automatically by running the following command. + +```shell +$ go mod edit -replace github.com/uber-go/atomic=github.com/uber-go/atomic@v1.4.0 +``` ## Usage + The standard library's `sync/atomic` is powerful, but it's easy to forget which variables must be accessed atomically. `go.uber.org/atomic` preserves all the functionality of the standard library, but wraps the primitive types to @@ -21,9 +46,11 @@ atom.CAS(40, 11) See the [documentation][doc] for a complete API specification. ## Development Status + Stable. -___ +--- + Released under the [MIT License](LICENSE.txt). [doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg diff --git a/vendor/go.uber.org/atomic/atomic.go b/vendor/go.uber.org/atomic/atomic.go deleted file mode 100644 index 1db6849..0000000 --- a/vendor/go.uber.org/atomic/atomic.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package atomic provides simple wrappers around numerics to enforce atomic -// access. -package atomic - -import ( - "math" - "sync/atomic" - "time" -) - -// Int32 is an atomic wrapper around an int32. -type Int32 struct{ v int32 } - -// NewInt32 creates an Int32. -func NewInt32(i int32) *Int32 { - return &Int32{i} -} - -// Load atomically loads the wrapped value. -func (i *Int32) Load() int32 { - return atomic.LoadInt32(&i.v) -} - -// Add atomically adds to the wrapped int32 and returns the new value. -func (i *Int32) Add(n int32) int32 { - return atomic.AddInt32(&i.v, n) -} - -// Sub atomically subtracts from the wrapped int32 and returns the new value. -func (i *Int32) Sub(n int32) int32 { - return atomic.AddInt32(&i.v, -n) -} - -// Inc atomically increments the wrapped int32 and returns the new value. -func (i *Int32) Inc() int32 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped int32 and returns the new value. -func (i *Int32) Dec() int32 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Int32) CAS(old, new int32) bool { - return atomic.CompareAndSwapInt32(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Int32) Store(n int32) { - atomic.StoreInt32(&i.v, n) -} - -// Swap atomically swaps the wrapped int32 and returns the old value. -func (i *Int32) Swap(n int32) int32 { - return atomic.SwapInt32(&i.v, n) -} - -// Int64 is an atomic wrapper around an int64. -type Int64 struct{ v int64 } - -// NewInt64 creates an Int64. -func NewInt64(i int64) *Int64 { - return &Int64{i} -} - -// Load atomically loads the wrapped value. -func (i *Int64) Load() int64 { - return atomic.LoadInt64(&i.v) -} - -// Add atomically adds to the wrapped int64 and returns the new value. -func (i *Int64) Add(n int64) int64 { - return atomic.AddInt64(&i.v, n) -} - -// Sub atomically subtracts from the wrapped int64 and returns the new value. -func (i *Int64) Sub(n int64) int64 { - return atomic.AddInt64(&i.v, -n) -} - -// Inc atomically increments the wrapped int64 and returns the new value. -func (i *Int64) Inc() int64 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped int64 and returns the new value. -func (i *Int64) Dec() int64 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Int64) CAS(old, new int64) bool { - return atomic.CompareAndSwapInt64(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Int64) Store(n int64) { - atomic.StoreInt64(&i.v, n) -} - -// Swap atomically swaps the wrapped int64 and returns the old value. -func (i *Int64) Swap(n int64) int64 { - return atomic.SwapInt64(&i.v, n) -} - -// Uint32 is an atomic wrapper around an uint32. -type Uint32 struct{ v uint32 } - -// NewUint32 creates a Uint32. -func NewUint32(i uint32) *Uint32 { - return &Uint32{i} -} - -// Load atomically loads the wrapped value. -func (i *Uint32) Load() uint32 { - return atomic.LoadUint32(&i.v) -} - -// Add atomically adds to the wrapped uint32 and returns the new value. -func (i *Uint32) Add(n uint32) uint32 { - return atomic.AddUint32(&i.v, n) -} - -// Sub atomically subtracts from the wrapped uint32 and returns the new value. -func (i *Uint32) Sub(n uint32) uint32 { - return atomic.AddUint32(&i.v, ^(n - 1)) -} - -// Inc atomically increments the wrapped uint32 and returns the new value. -func (i *Uint32) Inc() uint32 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped int32 and returns the new value. -func (i *Uint32) Dec() uint32 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Uint32) CAS(old, new uint32) bool { - return atomic.CompareAndSwapUint32(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Uint32) Store(n uint32) { - atomic.StoreUint32(&i.v, n) -} - -// Swap atomically swaps the wrapped uint32 and returns the old value. -func (i *Uint32) Swap(n uint32) uint32 { - return atomic.SwapUint32(&i.v, n) -} - -// Uint64 is an atomic wrapper around a uint64. -type Uint64 struct{ v uint64 } - -// NewUint64 creates a Uint64. -func NewUint64(i uint64) *Uint64 { - return &Uint64{i} -} - -// Load atomically loads the wrapped value. -func (i *Uint64) Load() uint64 { - return atomic.LoadUint64(&i.v) -} - -// Add atomically adds to the wrapped uint64 and returns the new value. -func (i *Uint64) Add(n uint64) uint64 { - return atomic.AddUint64(&i.v, n) -} - -// Sub atomically subtracts from the wrapped uint64 and returns the new value. -func (i *Uint64) Sub(n uint64) uint64 { - return atomic.AddUint64(&i.v, ^(n - 1)) -} - -// Inc atomically increments the wrapped uint64 and returns the new value. -func (i *Uint64) Inc() uint64 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped uint64 and returns the new value. -func (i *Uint64) Dec() uint64 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Uint64) CAS(old, new uint64) bool { - return atomic.CompareAndSwapUint64(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Uint64) Store(n uint64) { - atomic.StoreUint64(&i.v, n) -} - -// Swap atomically swaps the wrapped uint64 and returns the old value. -func (i *Uint64) Swap(n uint64) uint64 { - return atomic.SwapUint64(&i.v, n) -} - -// Bool is an atomic Boolean. -type Bool struct{ v uint32 } - -// NewBool creates a Bool. -func NewBool(initial bool) *Bool { - return &Bool{boolToInt(initial)} -} - -// Load atomically loads the Boolean. -func (b *Bool) Load() bool { - return truthy(atomic.LoadUint32(&b.v)) -} - -// CAS is an atomic compare-and-swap. -func (b *Bool) CAS(old, new bool) bool { - return atomic.CompareAndSwapUint32(&b.v, boolToInt(old), boolToInt(new)) -} - -// Store atomically stores the passed value. -func (b *Bool) Store(new bool) { - atomic.StoreUint32(&b.v, boolToInt(new)) -} - -// Swap sets the given value and returns the previous value. -func (b *Bool) Swap(new bool) bool { - return truthy(atomic.SwapUint32(&b.v, boolToInt(new))) -} - -// Toggle atomically negates the Boolean and returns the previous value. -func (b *Bool) Toggle() bool { - return truthy(atomic.AddUint32(&b.v, 1) - 1) -} - -func truthy(n uint32) bool { - return n&1 == 1 -} - -func boolToInt(b bool) uint32 { - if b { - return 1 - } - return 0 -} - -// Float64 is an atomic wrapper around float64. -type Float64 struct { - v uint64 -} - -// NewFloat64 creates a Float64. -func NewFloat64(f float64) *Float64 { - return &Float64{math.Float64bits(f)} -} - -// Load atomically loads the wrapped value. -func (f *Float64) Load() float64 { - return math.Float64frombits(atomic.LoadUint64(&f.v)) -} - -// Store atomically stores the passed value. -func (f *Float64) Store(s float64) { - atomic.StoreUint64(&f.v, math.Float64bits(s)) -} - -// Add atomically adds to the wrapped float64 and returns the new value. -func (f *Float64) Add(s float64) float64 { - for { - old := f.Load() - new := old + s - if f.CAS(old, new) { - return new - } - } -} - -// Sub atomically subtracts from the wrapped float64 and returns the new value. -func (f *Float64) Sub(s float64) float64 { - return f.Add(-s) -} - -// CAS is an atomic compare-and-swap. -func (f *Float64) CAS(old, new float64) bool { - return atomic.CompareAndSwapUint64(&f.v, math.Float64bits(old), math.Float64bits(new)) -} - -// Duration is an atomic wrapper around time.Duration -// https://godoc.org/time#Duration -type Duration struct { - v Int64 -} - -// NewDuration creates a Duration. -func NewDuration(d time.Duration) *Duration { - return &Duration{v: *NewInt64(int64(d))} -} - -// Load atomically loads the wrapped value. -func (d *Duration) Load() time.Duration { - return time.Duration(d.v.Load()) -} - -// Store atomically stores the passed value. -func (d *Duration) Store(n time.Duration) { - d.v.Store(int64(n)) -} - -// Add atomically adds to the wrapped time.Duration and returns the new value. -func (d *Duration) Add(n time.Duration) time.Duration { - return time.Duration(d.v.Add(int64(n))) -} - -// Sub atomically subtracts from the wrapped time.Duration and returns the new value. -func (d *Duration) Sub(n time.Duration) time.Duration { - return time.Duration(d.v.Sub(int64(n))) -} - -// Swap atomically swaps the wrapped time.Duration and returns the old value. -func (d *Duration) Swap(n time.Duration) time.Duration { - return time.Duration(d.v.Swap(int64(n))) -} - -// CAS is an atomic compare-and-swap. -func (d *Duration) CAS(old, new time.Duration) bool { - return d.v.CAS(int64(old), int64(new)) -} - -// Value shadows the type of the same name from sync/atomic -// https://godoc.org/sync/atomic#Value -type Value struct{ atomic.Value } diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go new file mode 100644 index 0000000..9cf1914 --- /dev/null +++ b/vendor/go.uber.org/atomic/bool.go @@ -0,0 +1,81 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" +) + +// Bool is an atomic type-safe wrapper for bool values. +type Bool struct { + _ nocmp // disallow non-atomic comparison + + v Uint32 +} + +var _zeroBool bool + +// NewBool creates a new Bool. +func NewBool(v bool) *Bool { + x := &Bool{} + if v != _zeroBool { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped bool. +func (x *Bool) Load() bool { + return truthy(x.v.Load()) +} + +// Store atomically stores the passed bool. +func (x *Bool) Store(v bool) { + x.v.Store(boolToInt(v)) +} + +// CAS is an atomic compare-and-swap for bool values. +func (x *Bool) CAS(o, n bool) bool { + return x.v.CAS(boolToInt(o), boolToInt(n)) +} + +// Swap atomically stores the given bool and returns the old +// value. +func (x *Bool) Swap(o bool) bool { + return truthy(x.v.Swap(boolToInt(o))) +} + +// MarshalJSON encodes the wrapped bool into JSON. +func (x *Bool) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a bool from JSON. +func (x *Bool) UnmarshalJSON(b []byte) error { + var v bool + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/bool_ext.go b/vendor/go.uber.org/atomic/bool_ext.go new file mode 100644 index 0000000..c7bf7a8 --- /dev/null +++ b/vendor/go.uber.org/atomic/bool_ext.go @@ -0,0 +1,53 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "strconv" +) + +//go:generate bin/gen-atomicwrapper -name=Bool -type=bool -wrapped=Uint32 -pack=boolToInt -unpack=truthy -cas -swap -json -file=bool.go + +func truthy(n uint32) bool { + return n == 1 +} + +func boolToInt(b bool) uint32 { + if b { + return 1 + } + return 0 +} + +// Toggle atomically negates the Boolean and returns the previous value. +func (b *Bool) Toggle() bool { + for { + old := b.Load() + if b.CAS(old, !old) { + return old + } + } +} + +// String encodes the wrapped value as a string. +func (b *Bool) String() string { + return strconv.FormatBool(b.Load()) +} diff --git a/vendor/go.uber.org/atomic/doc.go b/vendor/go.uber.org/atomic/doc.go new file mode 100644 index 0000000..ae7390e --- /dev/null +++ b/vendor/go.uber.org/atomic/doc.go @@ -0,0 +1,23 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package atomic provides simple wrappers around numerics to enforce atomic +// access. +package atomic diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go new file mode 100644 index 0000000..027cfcb --- /dev/null +++ b/vendor/go.uber.org/atomic/duration.go @@ -0,0 +1,82 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "time" +) + +// Duration is an atomic type-safe wrapper for time.Duration values. +type Duration struct { + _ nocmp // disallow non-atomic comparison + + v Int64 +} + +var _zeroDuration time.Duration + +// NewDuration creates a new Duration. +func NewDuration(v time.Duration) *Duration { + x := &Duration{} + if v != _zeroDuration { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped time.Duration. +func (x *Duration) Load() time.Duration { + return time.Duration(x.v.Load()) +} + +// Store atomically stores the passed time.Duration. +func (x *Duration) Store(v time.Duration) { + x.v.Store(int64(v)) +} + +// CAS is an atomic compare-and-swap for time.Duration values. +func (x *Duration) CAS(o, n time.Duration) bool { + return x.v.CAS(int64(o), int64(n)) +} + +// Swap atomically stores the given time.Duration and returns the old +// value. +func (x *Duration) Swap(o time.Duration) time.Duration { + return time.Duration(x.v.Swap(int64(o))) +} + +// MarshalJSON encodes the wrapped time.Duration into JSON. +func (x *Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a time.Duration from JSON. +func (x *Duration) UnmarshalJSON(b []byte) error { + var v time.Duration + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/duration_ext.go b/vendor/go.uber.org/atomic/duration_ext.go new file mode 100644 index 0000000..6273b66 --- /dev/null +++ b/vendor/go.uber.org/atomic/duration_ext.go @@ -0,0 +1,40 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "time" + +//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go + +// Add atomically adds to the wrapped time.Duration and returns the new value. +func (d *Duration) Add(n time.Duration) time.Duration { + return time.Duration(d.v.Add(int64(n))) +} + +// Sub atomically subtracts from the wrapped time.Duration and returns the new value. +func (d *Duration) Sub(n time.Duration) time.Duration { + return time.Duration(d.v.Sub(int64(n))) +} + +// String encodes the wrapped value as a string. +func (d *Duration) String() string { + return d.Load().String() +} diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go index 0489d19..a6166fb 100644 --- a/vendor/go.uber.org/atomic/error.go +++ b/vendor/go.uber.org/atomic/error.go @@ -1,4 +1,6 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -20,36 +22,30 @@ package atomic -// Error is an atomic type-safe wrapper around Value for errors -type Error struct{ v Value } - -// errorHolder is non-nil holder for error object. -// atomic.Value panics on saving nil object, so err object needs to be -// wrapped with valid object first. -type errorHolder struct{ err error } +// Error is an atomic type-safe wrapper for error values. +type Error struct { + _ nocmp // disallow non-atomic comparison -// NewError creates new atomic error object -func NewError(err error) *Error { - e := &Error{} - if err != nil { - e.Store(err) - } - return e + v Value } -// Load atomically loads the wrapped error -func (e *Error) Load() error { - v := e.v.Load() - if v == nil { - return nil +var _zeroError error + +// NewError creates a new Error. +func NewError(v error) *Error { + x := &Error{} + if v != _zeroError { + x.Store(v) } + return x +} - eh := v.(errorHolder) - return eh.err +// Load atomically loads the wrapped error. +func (x *Error) Load() error { + return unpackError(x.v.Load()) } -// Store atomically stores error. -// NOTE: a holder object is allocated on each Store call. -func (e *Error) Store(err error) { - e.v.Store(errorHolder{err: err}) +// Store atomically stores the passed error. +func (x *Error) Store(v error) { + x.v.Store(packError(v)) } diff --git a/vendor/go.uber.org/atomic/error_ext.go b/vendor/go.uber.org/atomic/error_ext.go new file mode 100644 index 0000000..ffe0be2 --- /dev/null +++ b/vendor/go.uber.org/atomic/error_ext.go @@ -0,0 +1,39 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// atomic.Value panics on nil inputs, or if the underlying type changes. +// Stabilize by always storing a custom struct that we control. + +//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -file=error.go + +type packedError struct{ Value error } + +func packError(v error) interface{} { + return packedError{v} +} + +func unpackError(v interface{}) error { + if err, ok := v.(packedError); ok { + return err.Value + } + return nil +} diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go new file mode 100644 index 0000000..0719060 --- /dev/null +++ b/vendor/go.uber.org/atomic/float64.go @@ -0,0 +1,76 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "math" +) + +// Float64 is an atomic type-safe wrapper for float64 values. +type Float64 struct { + _ nocmp // disallow non-atomic comparison + + v Uint64 +} + +var _zeroFloat64 float64 + +// NewFloat64 creates a new Float64. +func NewFloat64(v float64) *Float64 { + x := &Float64{} + if v != _zeroFloat64 { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped float64. +func (x *Float64) Load() float64 { + return math.Float64frombits(x.v.Load()) +} + +// Store atomically stores the passed float64. +func (x *Float64) Store(v float64) { + x.v.Store(math.Float64bits(v)) +} + +// CAS is an atomic compare-and-swap for float64 values. +func (x *Float64) CAS(o, n float64) bool { + return x.v.CAS(math.Float64bits(o), math.Float64bits(n)) +} + +// MarshalJSON encodes the wrapped float64 into JSON. +func (x *Float64) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a float64 from JSON. +func (x *Float64) UnmarshalJSON(b []byte) error { + var v float64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go new file mode 100644 index 0000000..927b1ad --- /dev/null +++ b/vendor/go.uber.org/atomic/float64_ext.go @@ -0,0 +1,47 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "strconv" + +//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -cas -json -imports math -file=float64.go + +// Add atomically adds to the wrapped float64 and returns the new value. +func (f *Float64) Add(s float64) float64 { + for { + old := f.Load() + new := old + s + if f.CAS(old, new) { + return new + } + } +} + +// Sub atomically subtracts from the wrapped float64 and returns the new value. +func (f *Float64) Sub(s float64) float64 { + return f.Add(-s) +} + +// String encodes the wrapped value as a string. +func (f *Float64) String() string { + // 'g' is the behavior for floats with %v. + return strconv.FormatFloat(f.Load(), 'g', -1, 64) +} diff --git a/vendor/go.uber.org/atomic/gen.go b/vendor/go.uber.org/atomic/gen.go new file mode 100644 index 0000000..50d6b24 --- /dev/null +++ b/vendor/go.uber.org/atomic/gen.go @@ -0,0 +1,26 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +//go:generate bin/gen-atomicint -name=Int32 -wrapped=int32 -file=int32.go +//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go +//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go +//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go diff --git a/vendor/go.uber.org/atomic/glide.lock b/vendor/go.uber.org/atomic/glide.lock deleted file mode 100644 index 3c72c59..0000000 --- a/vendor/go.uber.org/atomic/glide.lock +++ /dev/null @@ -1,17 +0,0 @@ -hash: f14d51408e3e0e4f73b34e4039484c78059cd7fc5f4996fdd73db20dc8d24f53 -updated: 2016-10-27T00:10:51.16960137-07:00 -imports: [] -testImports: -- name: github.com/davecgh/go-spew - version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d - subpackages: - - spew -- name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d - subpackages: - - difflib -- name: github.com/stretchr/testify - version: d77da356e56a7428ad25149ca77381849a6a5232 - subpackages: - - assert - - require diff --git a/vendor/go.uber.org/atomic/glide.yaml b/vendor/go.uber.org/atomic/glide.yaml deleted file mode 100644 index 4cf608e..0000000 --- a/vendor/go.uber.org/atomic/glide.yaml +++ /dev/null @@ -1,6 +0,0 @@ -package: go.uber.org/atomic -testImport: -- package: github.com/stretchr/testify - subpackages: - - assert - - require diff --git a/vendor/go.uber.org/atomic/go.mod b/vendor/go.uber.org/atomic/go.mod new file mode 100644 index 0000000..daa7599 --- /dev/null +++ b/vendor/go.uber.org/atomic/go.mod @@ -0,0 +1,8 @@ +module go.uber.org/atomic + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/stretchr/testify v1.3.0 +) + +go 1.13 diff --git a/vendor/go.uber.org/atomic/go.sum b/vendor/go.uber.org/atomic/go.sum new file mode 100644 index 0000000..4f76e62 --- /dev/null +++ b/vendor/go.uber.org/atomic/go.sum @@ -0,0 +1,9 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go new file mode 100644 index 0000000..18ae564 --- /dev/null +++ b/vendor/go.uber.org/atomic/int32.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Int32 is an atomic wrapper around int32. +type Int32 struct { + _ nocmp // disallow non-atomic comparison + + v int32 +} + +// NewInt32 creates a new Int32. +func NewInt32(i int32) *Int32 { + return &Int32{v: i} +} + +// Load atomically loads the wrapped value. +func (i *Int32) Load() int32 { + return atomic.LoadInt32(&i.v) +} + +// Add atomically adds to the wrapped int32 and returns the new value. +func (i *Int32) Add(n int32) int32 { + return atomic.AddInt32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int32 and returns the new value. +func (i *Int32) Sub(n int32) int32 { + return atomic.AddInt32(&i.v, -n) +} + +// Inc atomically increments the wrapped int32 and returns the new value. +func (i *Int32) Inc() int32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int32 and returns the new value. +func (i *Int32) Dec() int32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int32) CAS(old, new int32) bool { + return atomic.CompareAndSwapInt32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int32) Store(n int32) { + atomic.StoreInt32(&i.v, n) +} + +// Swap atomically swaps the wrapped int32 and returns the old value. +func (i *Int32) Swap(n int32) int32 { + return atomic.SwapInt32(&i.v, n) +} + +// MarshalJSON encodes the wrapped int32 into JSON. +func (i *Int32) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped int32. +func (i *Int32) UnmarshalJSON(b []byte) error { + var v int32 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Int32) String() string { + v := i.Load() + return strconv.FormatInt(int64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go new file mode 100644 index 0000000..2bcbbfa --- /dev/null +++ b/vendor/go.uber.org/atomic/int64.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Int64 is an atomic wrapper around int64. +type Int64 struct { + _ nocmp // disallow non-atomic comparison + + v int64 +} + +// NewInt64 creates a new Int64. +func NewInt64(i int64) *Int64 { + return &Int64{v: i} +} + +// Load atomically loads the wrapped value. +func (i *Int64) Load() int64 { + return atomic.LoadInt64(&i.v) +} + +// Add atomically adds to the wrapped int64 and returns the new value. +func (i *Int64) Add(n int64) int64 { + return atomic.AddInt64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int64 and returns the new value. +func (i *Int64) Sub(n int64) int64 { + return atomic.AddInt64(&i.v, -n) +} + +// Inc atomically increments the wrapped int64 and returns the new value. +func (i *Int64) Inc() int64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int64 and returns the new value. +func (i *Int64) Dec() int64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int64) CAS(old, new int64) bool { + return atomic.CompareAndSwapInt64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int64) Store(n int64) { + atomic.StoreInt64(&i.v, n) +} + +// Swap atomically swaps the wrapped int64 and returns the old value. +func (i *Int64) Swap(n int64) int64 { + return atomic.SwapInt64(&i.v, n) +} + +// MarshalJSON encodes the wrapped int64 into JSON. +func (i *Int64) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped int64. +func (i *Int64) UnmarshalJSON(b []byte) error { + var v int64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Int64) String() string { + v := i.Load() + return strconv.FormatInt(int64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/nocmp.go b/vendor/go.uber.org/atomic/nocmp.go new file mode 100644 index 0000000..a8201cb --- /dev/null +++ b/vendor/go.uber.org/atomic/nocmp.go @@ -0,0 +1,35 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// nocmp is an uncomparable struct. Embed this inside another struct to make +// it uncomparable. +// +// type Foo struct { +// nocmp +// // ... +// } +// +// This DOES NOT: +// +// - Disallow shallow copies of structs +// - Disallow comparison of pointers to uncomparable structs +type nocmp [0]func() diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go index ede8136..225b7a2 100644 --- a/vendor/go.uber.org/atomic/string.go +++ b/vendor/go.uber.org/atomic/string.go @@ -1,4 +1,6 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -20,30 +22,33 @@ package atomic -// String is an atomic type-safe wrapper around Value for strings. -type String struct{ v Value } +// String is an atomic type-safe wrapper for string values. +type String struct { + _ nocmp // disallow non-atomic comparison + + v Value +} + +var _zeroString string -// NewString creates a String. -func NewString(str string) *String { - s := &String{} - if str != "" { - s.Store(str) +// NewString creates a new String. +func NewString(v string) *String { + x := &String{} + if v != _zeroString { + x.Store(v) } - return s + return x } // Load atomically loads the wrapped string. -func (s *String) Load() string { - v := s.v.Load() - if v == nil { - return "" +func (x *String) Load() string { + if v := x.v.Load(); v != nil { + return v.(string) } - return v.(string) + return _zeroString } // Store atomically stores the passed string. -// Note: Converting the string to an interface{} to store in the Value -// requires an allocation. -func (s *String) Store(str string) { - s.v.Store(str) +func (x *String) Store(v string) { + x.v.Store(v) } diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go new file mode 100644 index 0000000..3a95582 --- /dev/null +++ b/vendor/go.uber.org/atomic/string_ext.go @@ -0,0 +1,43 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -file=string.go + +// String returns the wrapped value. +func (s *String) String() string { + return s.Load() +} + +// MarshalText encodes the wrapped string into a textual form. +// +// This makes it encodable as JSON, YAML, XML, and more. +func (s *String) MarshalText() ([]byte, error) { + return []byte(s.Load()), nil +} + +// UnmarshalText decodes text and replaces the wrapped string with it. +// +// This makes it decodable from JSON, YAML, XML, and more. +func (s *String) UnmarshalText(b []byte) error { + s.Store(string(b)) + return nil +} diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go new file mode 100644 index 0000000..a973aba --- /dev/null +++ b/vendor/go.uber.org/atomic/uint32.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Uint32 is an atomic wrapper around uint32. +type Uint32 struct { + _ nocmp // disallow non-atomic comparison + + v uint32 +} + +// NewUint32 creates a new Uint32. +func NewUint32(i uint32) *Uint32 { + return &Uint32{v: i} +} + +// Load atomically loads the wrapped value. +func (i *Uint32) Load() uint32 { + return atomic.LoadUint32(&i.v) +} + +// Add atomically adds to the wrapped uint32 and returns the new value. +func (i *Uint32) Add(n uint32) uint32 { + return atomic.AddUint32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint32 and returns the new value. +func (i *Uint32) Sub(n uint32) uint32 { + return atomic.AddUint32(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint32 and returns the new value. +func (i *Uint32) Inc() uint32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint32 and returns the new value. +func (i *Uint32) Dec() uint32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint32) CAS(old, new uint32) bool { + return atomic.CompareAndSwapUint32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint32) Store(n uint32) { + atomic.StoreUint32(&i.v, n) +} + +// Swap atomically swaps the wrapped uint32 and returns the old value. +func (i *Uint32) Swap(n uint32) uint32 { + return atomic.SwapUint32(&i.v, n) +} + +// MarshalJSON encodes the wrapped uint32 into JSON. +func (i *Uint32) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped uint32. +func (i *Uint32) UnmarshalJSON(b []byte) error { + var v uint32 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Uint32) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go new file mode 100644 index 0000000..3b6c71f --- /dev/null +++ b/vendor/go.uber.org/atomic/uint64.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Uint64 is an atomic wrapper around uint64. +type Uint64 struct { + _ nocmp // disallow non-atomic comparison + + v uint64 +} + +// NewUint64 creates a new Uint64. +func NewUint64(i uint64) *Uint64 { + return &Uint64{v: i} +} + +// Load atomically loads the wrapped value. +func (i *Uint64) Load() uint64 { + return atomic.LoadUint64(&i.v) +} + +// Add atomically adds to the wrapped uint64 and returns the new value. +func (i *Uint64) Add(n uint64) uint64 { + return atomic.AddUint64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint64 and returns the new value. +func (i *Uint64) Sub(n uint64) uint64 { + return atomic.AddUint64(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint64 and returns the new value. +func (i *Uint64) Inc() uint64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint64 and returns the new value. +func (i *Uint64) Dec() uint64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint64) CAS(old, new uint64) bool { + return atomic.CompareAndSwapUint64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint64) Store(n uint64) { + atomic.StoreUint64(&i.v, n) +} + +// Swap atomically swaps the wrapped uint64 and returns the old value. +func (i *Uint64) Swap(n uint64) uint64 { + return atomic.SwapUint64(&i.v, n) +} + +// MarshalJSON encodes the wrapped uint64 into JSON. +func (i *Uint64) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped uint64. +func (i *Uint64) UnmarshalJSON(b []byte) error { + var v uint64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Uint64) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/value.go b/vendor/go.uber.org/atomic/value.go new file mode 100644 index 0000000..671f3a3 --- /dev/null +++ b/vendor/go.uber.org/atomic/value.go @@ -0,0 +1,31 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "sync/atomic" + +// Value shadows the type of the same name from sync/atomic +// https://godoc.org/sync/atomic#Value +type Value struct { + atomic.Value + + _ nocmp // disallow non-atomic comparison +} diff --git a/vendor/go.uber.org/multierr/.gitignore b/vendor/go.uber.org/multierr/.gitignore index 61ead86..b9a05e3 100644 --- a/vendor/go.uber.org/multierr/.gitignore +++ b/vendor/go.uber.org/multierr/.gitignore @@ -1 +1,4 @@ /vendor +cover.html +cover.out +/bin diff --git a/vendor/go.uber.org/multierr/.travis.yml b/vendor/go.uber.org/multierr/.travis.yml index 5ffa8fe..8636ab4 100644 --- a/vendor/go.uber.org/multierr/.travis.yml +++ b/vendor/go.uber.org/multierr/.travis.yml @@ -4,30 +4,20 @@ go_import_path: go.uber.org/multierr env: global: - - GO15VENDOREXPERIMENT=1 + - GO111MODULE=on go: - - 1.7 - - 1.8 - - tip - -cache: - directories: - - vendor + - oldstable + - stable before_install: - go version -install: -- | - set -e - make install_ci - script: - | set -e make lint - make test_ci + make cover after_success: - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md index 898445d..6f1db9e 100644 --- a/vendor/go.uber.org/multierr/CHANGELOG.md +++ b/vendor/go.uber.org/multierr/CHANGELOG.md @@ -1,6 +1,38 @@ Releases ======== +v1.6.0 (2020-09-14) +=================== + +- Actually drop library dependency on development-time tooling. + + +v1.5.0 (2020-02-24) +=================== + +- Drop library dependency on development-time tooling. + + +v1.4.0 (2019-11-04) +=================== + +- Add `AppendInto` function to more ergonomically build errors inside a + loop. + + +v1.3.0 (2019-10-29) +=================== + +- Switch to Go modules. + + +v1.2.0 (2019-09-26) +=================== + +- Support extracting and matching against wrapped errors with `errors.As` + and `errors.Is`. + + v1.1.0 (2017-06-30) =================== diff --git a/vendor/go.uber.org/multierr/Makefile b/vendor/go.uber.org/multierr/Makefile index a7437d0..3160044 100644 --- a/vendor/go.uber.org/multierr/Makefile +++ b/vendor/go.uber.org/multierr/Makefile @@ -1,23 +1,17 @@ -export GO15VENDOREXPERIMENT=1 - -PACKAGES := $(shell glide nv) +# Directory to put `go install`ed binaries in. +export GOBIN ?= $(shell pwd)/bin GO_FILES := $(shell \ find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ -o -name '*.go' -print | cut -b3-) -.PHONY: install -install: - glide --version || go get github.com/Masterminds/glide - glide install - .PHONY: build build: - go build -i $(PACKAGES) + go build ./... .PHONY: test test: - go test -cover -race $(PACKAGES) + go test -race ./... .PHONY: gofmt gofmt: @@ -25,50 +19,24 @@ gofmt: @gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" | cat - $(FMT_LOG) && false) -.PHONY: govet -govet: - $(eval VET_LOG := $(shell mktemp -t govet.XXXXX)) - @go vet $(PACKAGES) 2>&1 \ - | grep -v '^exit status' > $(VET_LOG) || true - @[ ! -s "$(VET_LOG)" ] || (echo "govet failed:" | cat - $(VET_LOG) && false) - .PHONY: golint golint: - @go get github.com/golang/lint/golint - $(eval LINT_LOG := $(shell mktemp -t golint.XXXXX)) - @cat /dev/null > $(LINT_LOG) - @$(foreach pkg, $(PACKAGES), golint $(pkg) >> $(LINT_LOG) || true;) - @[ ! -s "$(LINT_LOG)" ] || (echo "golint failed:" | cat - $(LINT_LOG) && false) + @cd tools && go install golang.org/x/lint/golint + @$(GOBIN)/golint ./... .PHONY: staticcheck staticcheck: - @go get honnef.co/go/tools/cmd/staticcheck - $(eval STATICCHECK_LOG := $(shell mktemp -t staticcheck.XXXXX)) - @staticcheck $(PACKAGES) 2>&1 > $(STATICCHECK_LOG) || true - @[ ! -s "$(STATICCHECK_LOG)" ] || (echo "staticcheck failed:" | cat - $(STATICCHECK_LOG) && false) + @cd tools && go install honnef.co/go/tools/cmd/staticcheck + @$(GOBIN)/staticcheck ./... .PHONY: lint -lint: gofmt govet golint staticcheck +lint: gofmt golint staticcheck .PHONY: cover cover: - ./scripts/cover.sh $(shell go list $(PACKAGES)) + go test -coverprofile=cover.out -coverpkg=./... -v ./... go tool cover -html=cover.out -o cover.html update-license: - @go get go.uber.org/tools/update-license - @update-license \ - $(shell go list -json $(PACKAGES) | \ - jq -r '.Dir + "/" + (.GoFiles | .[])') - -############################################################################## - -.PHONY: install_ci -install_ci: install - go get github.com/wadey/gocovmerge - go get github.com/mattn/goveralls - go get golang.org/x/tools/cmd/cover - -.PHONY: test_ci -test_ci: install_ci - ./scripts/cover.sh $(shell go list $(PACKAGES)) + @cd tools && go install go.uber.org/tools/update-license + @$(GOBIN)/update-license $(GO_FILES) diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md index 065088f..751bd65 100644 --- a/vendor/go.uber.org/multierr/README.md +++ b/vendor/go.uber.org/multierr/README.md @@ -17,7 +17,7 @@ Released under the [MIT License]. [MIT License]: LICENSE.txt [doc-img]: https://godoc.org/go.uber.org/multierr?status.svg [doc]: https://godoc.org/go.uber.org/multierr -[ci-img]: https://travis-ci.org/uber-go/multierr.svg?branch=master +[ci-img]: https://travis-ci.com/uber-go/multierr.svg?branch=master [cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg -[ci]: https://travis-ci.org/uber-go/multierr +[ci]: https://travis-ci.com/uber-go/multierr [cov]: https://codecov.io/gh/uber-go/multierr diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go index de6ce47..5c9b67d 100644 --- a/vendor/go.uber.org/multierr/error.go +++ b/vendor/go.uber.org/multierr/error.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Uber Technologies, Inc. +// Copyright (c) 2019 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -33,7 +33,7 @@ // If only two errors are being combined, the Append function may be used // instead. // -// err = multierr.Combine(reader.Close(), writer.Close()) +// err = multierr.Append(reader.Close(), writer.Close()) // // This makes it possible to record resource cleanup failures from deferred // blocks with the help of named return values. @@ -54,7 +54,7 @@ // // errors := multierr.Errors(err) // if len(errors) > 0 { -// fmt.Println("The following errors occurred:") +// fmt.Println("The following errors occurred:", errors) // } // // Advanced Usage @@ -99,8 +99,6 @@ var ( // Separator for single-line error messages. _singlelineSeparator = []byte("; ") - _newline = []byte("\n") - // Prefix for multi-line messages _multilinePrefix = []byte("the following errors occurred:") @@ -132,7 +130,7 @@ type errorGroup interface { } // Errors returns a slice containing zero or more errors that the supplied -// error is composed of. If the error is nil, the returned slice is empty. +// error is composed of. If the error is nil, a nil slice is returned. // // err := multierr.Append(r.Close(), w.Close()) // errors := multierr.Errors(err) @@ -399,3 +397,53 @@ func Append(left error, right error) error { errors := [2]error{left, right} return fromSlice(errors[0:]) } + +// AppendInto appends an error into the destination of an error pointer and +// returns whether the error being appended was non-nil. +// +// var err error +// multierr.AppendInto(&err, r.Close()) +// multierr.AppendInto(&err, w.Close()) +// +// The above is equivalent to, +// +// err := multierr.Append(r.Close(), w.Close()) +// +// As AppendInto reports whether the provided error was non-nil, it may be +// used to build a multierr error in a loop more ergonomically. For example: +// +// var err error +// for line := range lines { +// var item Item +// if multierr.AppendInto(&err, parse(line, &item)) { +// continue +// } +// items = append(items, item) +// } +// +// Compare this with a verison that relies solely on Append: +// +// var err error +// for line := range lines { +// var item Item +// if parseErr := parse(line, &item); parseErr != nil { +// err = multierr.Append(err, parseErr) +// continue +// } +// items = append(items, item) +// } +func AppendInto(into *error, err error) (errored bool) { + if into == nil { + // We panic if 'into' is nil. This is not documented above + // because suggesting that the pointer must be non-nil may + // confuse users into thinking that the error that it points + // to must be non-nil. + panic("misuse of multierr.AppendInto: into pointer must not be nil") + } + + if err == nil { + return false + } + *into = Append(*into, err) + return true +} diff --git a/vendor/go.uber.org/multierr/glide.lock b/vendor/go.uber.org/multierr/glide.lock deleted file mode 100644 index f9ea94c..0000000 --- a/vendor/go.uber.org/multierr/glide.lock +++ /dev/null @@ -1,19 +0,0 @@ -hash: b53b5e9a84b9cb3cc4b2d0499e23da2feca1eec318ce9bb717ecf35bf24bf221 -updated: 2017-04-10T13:34:45.671678062-07:00 -imports: -- name: go.uber.org/atomic - version: 3b8db5e93c4c02efbc313e17b2e796b0914a01fb -testImports: -- name: github.com/davecgh/go-spew - version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9 - subpackages: - - spew -- name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d - subpackages: - - difflib -- name: github.com/stretchr/testify - version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0 - subpackages: - - assert - - require diff --git a/vendor/go.uber.org/multierr/go.mod b/vendor/go.uber.org/multierr/go.mod new file mode 100644 index 0000000..ff8bdf9 --- /dev/null +++ b/vendor/go.uber.org/multierr/go.mod @@ -0,0 +1,8 @@ +module go.uber.org/multierr + +go 1.12 + +require ( + github.com/stretchr/testify v1.3.0 + go.uber.org/atomic v1.7.0 +) diff --git a/vendor/go.uber.org/multierr/go.sum b/vendor/go.uber.org/multierr/go.sum new file mode 100644 index 0000000..ecfc286 --- /dev/null +++ b/vendor/go.uber.org/multierr/go.sum @@ -0,0 +1,11 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= diff --git a/vendor/go.uber.org/multierr/go113.go b/vendor/go.uber.org/multierr/go113.go new file mode 100644 index 0000000..264b0ea --- /dev/null +++ b/vendor/go.uber.org/multierr/go113.go @@ -0,0 +1,52 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build go1.13 + +package multierr + +import "errors" + +// As attempts to find the first error in the error list that matches the type +// of the value that target points to. +// +// This function allows errors.As to traverse the values stored on the +// multierr error. +func (merr *multiError) As(target interface{}) bool { + for _, err := range merr.Errors() { + if errors.As(err, target) { + return true + } + } + return false +} + +// Is attempts to match the provided error against errors in the error list. +// +// This function allows errors.Is to traverse the values stored on the +// multierr error. +func (merr *multiError) Is(target error) bool { + for _, err := range merr.Errors() { + if errors.Is(err, target) { + return true + } + } + return false +} diff --git a/vendor/go.uber.org/zap/.gitignore b/vendor/go.uber.org/zap/.gitignore index 08fbde6..da9d9d0 100644 --- a/vendor/go.uber.org/zap/.gitignore +++ b/vendor/go.uber.org/zap/.gitignore @@ -26,3 +26,7 @@ _testmain.go *.pprof *.out *.log + +/bin +cover.out +cover.html diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl index c6440db..3154a1e 100644 --- a/vendor/go.uber.org/zap/.readme.tmpl +++ b/vendor/go.uber.org/zap/.readme.tmpl @@ -100,9 +100,10 @@ pinned in zap's [glide.lock][] file. [↩](#anchor-versions) [doc-img]: https://godoc.org/go.uber.org/zap?status.svg [doc]: https://godoc.org/go.uber.org/zap -[ci-img]: https://travis-ci.org/uber-go/zap.svg?branch=master -[ci]: https://travis-ci.org/uber-go/zap +[ci-img]: https://travis-ci.com/uber-go/zap.svg?branch=master +[ci]: https://travis-ci.com/uber-go/zap [cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg [cov]: https://codecov.io/gh/uber-go/zap [benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks [glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock + diff --git a/vendor/go.uber.org/zap/.travis.yml b/vendor/go.uber.org/zap/.travis.yml index ada5ebd..cfdc69f 100644 --- a/vendor/go.uber.org/zap/.travis.yml +++ b/vendor/go.uber.org/zap/.travis.yml @@ -1,21 +1,23 @@ language: go sudo: false -go: - - 1.11.x - - 1.12.x + go_import_path: go.uber.org/zap env: global: - TEST_TIMEOUT_SCALE=10 -cache: - directories: - - vendor -install: - - make dependencies + - GO111MODULE=on + +matrix: + include: + - go: 1.13.x + - go: 1.14.x + env: LINT=1 + script: - - make lint + - test -z "$LINT" || make lint - make test - make bench + after_success: - make cover - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index 28d1067..fa817e6 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -1,5 +1,85 @@ # Changelog +## 1.16.0 (1 Sep 2020) + +Bugfixes: +* [#828][]: Fix missing newline in IncreaseLevel error messages. +* [#835][]: Fix panic in JSON encoder when encoding times or durations + without specifying a time or duration encoder. +* [#843][]: Honor CallerSkip when taking stack traces. +* [#862][]: Fix the default file permissions to use `0666` and rely on the umask instead. +* [#854][]: Encode `` for nil `Stringer` instead of a panic error log. + +Enhancements: +* [#629][]: Added `zapcore.TimeEncoderOfLayout` to easily create time encoders + for custom layouts. +* [#697][]: Added support for a configurable delimiter in the console encoder. +* [#852][]: Optimize console encoder by pooling the underlying JSON encoder. +* [#844][]: Add ability to include the calling function as part of logs. +* [#843][]: Add `StackSkip` for including truncated stacks as a field. +* [#861][]: Add options to customize Fatal behaviour for better testability. + +Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release. + +## 1.15.0 (23 Apr 2020) + +Bugfixes: +* [#804][]: Fix handling of `Time` values out of `UnixNano` range. +* [#812][]: Fix `IncreaseLevel` being reset after a call to `With`. + +Enhancements: +* [#806][]: Add `WithCaller` option to supersede the `AddCaller` option. This + allows disabling annotation of log entries with caller information if + previously enabled with `AddCaller`. +* [#813][]: Deprecate `NewSampler` constructor in favor of + `NewSamplerWithOptions` which supports a `SamplerHook` option. This option + adds support for monitoring sampling decisions through a hook. + +Thanks to @danielbprice for their contributions to this release. + +## 1.14.1 (14 Mar 2020) + +Bugfixes: +* [#791][]: Fix panic on attempting to build a logger with an invalid Config. +* [#795][]: Vendoring Zap with `go mod vendor` no longer includes Zap's + development-time dependencies. +* [#799][]: Fix issue introduced in 1.14.0 that caused invalid JSON output to + be generated for arrays of `time.Time` objects when using string-based time + formats. + +Thanks to @YashishDua for their contributions to this release. + +## 1.14.0 (20 Feb 2020) + +Enhancements: +* [#771][]: Optimize calls for disabled log levels. +* [#773][]: Add millisecond duration encoder. +* [#775][]: Add option to increase the level of a logger. +* [#786][]: Optimize time formatters using `Time.AppendFormat` where possible. + +Thanks to @caibirdme for their contributions to this release. + +## 1.13.0 (13 Nov 2019) + +Enhancements: +* [#758][]: Add `Intp`, `Stringp`, and other similar `*p` field constructors + to log pointers to primitives with support for `nil` values. + +Thanks to @jbizzle for their contributions to this release. + +## 1.12.0 (29 Oct 2019) + +Enhancements: +* [#751][]: Migrate to Go modules. + +## 1.11.0 (21 Oct 2019) + +Enhancements: +* [#725][]: Add `zapcore.OmitKey` to omit keys in an `EncoderConfig`. +* [#736][]: Add `RFC3339` and `RFC3339Nano` time encoders. + +Thanks to @juicemia, @uhthomas for their contributions to this release. + ## 1.10.0 (29 Apr 2019) Bugfixes: @@ -325,3 +405,28 @@ upgrade to the upcoming stable release. [#610]: https://github.com/uber-go/zap/pull/610 [#675]: https://github.com/uber-go/zap/pull/675 [#704]: https://github.com/uber-go/zap/pull/704 +[#725]: https://github.com/uber-go/zap/pull/725 +[#736]: https://github.com/uber-go/zap/pull/736 +[#751]: https://github.com/uber-go/zap/pull/751 +[#758]: https://github.com/uber-go/zap/pull/758 +[#771]: https://github.com/uber-go/zap/pull/771 +[#773]: https://github.com/uber-go/zap/pull/773 +[#775]: https://github.com/uber-go/zap/pull/775 +[#786]: https://github.com/uber-go/zap/pull/786 +[#791]: https://github.com/uber-go/zap/pull/791 +[#795]: https://github.com/uber-go/zap/pull/795 +[#799]: https://github.com/uber-go/zap/pull/799 +[#804]: https://github.com/uber-go/zap/pull/804 +[#812]: https://github.com/uber-go/zap/pull/812 +[#806]: https://github.com/uber-go/zap/pull/806 +[#813]: https://github.com/uber-go/zap/pull/813 +[#629]: https://github.com/uber-go/zap/pull/629 +[#697]: https://github.com/uber-go/zap/pull/697 +[#828]: https://github.com/uber-go/zap/pull/828 +[#835]: https://github.com/uber-go/zap/pull/835 +[#843]: https://github.com/uber-go/zap/pull/843 +[#844]: https://github.com/uber-go/zap/pull/844 +[#852]: https://github.com/uber-go/zap/pull/852 +[#854]: https://github.com/uber-go/zap/pull/854 +[#861]: https://github.com/uber-go/zap/pull/861 +[#862]: https://github.com/uber-go/zap/pull/862 diff --git a/vendor/go.uber.org/zap/FAQ.md b/vendor/go.uber.org/zap/FAQ.md index 4256d35..5ec7288 100644 --- a/vendor/go.uber.org/zap/FAQ.md +++ b/vendor/go.uber.org/zap/FAQ.md @@ -149,6 +149,7 @@ We're aware of the following extensions, but haven't used them ourselves: | `github.com/tchap/zapext` | Sentry, syslog | | `github.com/fgrosse/zaptest` | Ginkgo | | `github.com/blendle/zapdriver` | Stackdriver | +| `github.com/moul/zapgorm` | Gorm | [go-proverbs]: https://go-proverbs.github.io/ [import-path]: https://golang.org/cmd/go/#hdr-Remote_import_paths diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile index 073e9aa..dfaf640 100644 --- a/vendor/go.uber.org/zap/Makefile +++ b/vendor/go.uber.org/zap/Makefile @@ -1,74 +1,61 @@ -export GO15VENDOREXPERIMENT=1 +export GOBIN ?= $(shell pwd)/bin +GOLINT = $(GOBIN)/golint +STATICCHECK = $(GOBIN)/staticcheck BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem -PKGS ?= $(shell glide novendor) -# Many Go tools take file globs or directories as arguments instead of packages. -PKG_FILES ?= *.go zapcore benchmarks buffer zapgrpc zaptest zaptest/observer internal/bufferpool internal/exit internal/color internal/ztest -# The linting tools evolve with each Go version, so run them only on the latest -# stable release. -GO_VERSION := $(shell go version | cut -d " " -f 3) -GO_MINOR_VERSION := $(word 2,$(subst ., ,$(GO_VERSION))) -LINTABLE_MINOR_VERSIONS := 12 -ifneq ($(filter $(LINTABLE_MINOR_VERSIONS),$(GO_MINOR_VERSION)),) -SHOULD_LINT := true -endif +# Directories containing independent Go modules. +# +# We track coverage only for the main module. +MODULE_DIRS = . ./benchmarks +# Many Go tools take file globs or directories as arguments instead of packages. +GO_FILES := $(shell \ + find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ + -o -name '*.go' -print | cut -b3-) .PHONY: all all: lint test -.PHONY: dependencies -dependencies: - @echo "Installing Glide and locked dependencies..." - glide --version || go get -u -f github.com/Masterminds/glide - glide install - @echo "Installing test dependencies..." - go install ./vendor/github.com/axw/gocov/gocov - go install ./vendor/github.com/mattn/goveralls -ifdef SHOULD_LINT - @echo "Installing golint..." - go install ./vendor/github.com/golang/lint/golint -else - @echo "Not installing golint, since we don't expect to lint on" $(GO_VERSION) -endif - -# Disable printf-like invocation checking due to testify.assert.Error() -VET_RULES := -printf=false - .PHONY: lint -lint: -ifdef SHOULD_LINT +lint: $(GOLINT) $(STATICCHECK) @rm -rf lint.log @echo "Checking formatting..." - @gofmt -d -s $(PKG_FILES) 2>&1 | tee lint.log - @echo "Installing test dependencies for vet..." - @go test -i $(PKGS) + @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log @echo "Checking vet..." - @go vet $(VET_RULES) $(PKGS) 2>&1 | tee -a lint.log + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go vet ./... 2>&1) &&) true | tee -a lint.log @echo "Checking lint..." - @$(foreach dir,$(PKGS),golint $(dir) 2>&1 | tee -a lint.log;) + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(GOLINT) ./... 2>&1) &&) true | tee -a lint.log + @echo "Checking staticcheck..." + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(STATICCHECK) ./... 2>&1) &&) true | tee -a lint.log @echo "Checking for unresolved FIXMEs..." - @git grep -i fixme | grep -v -e vendor -e Makefile | tee -a lint.log + @git grep -i fixme | grep -v -e Makefile | tee -a lint.log @echo "Checking for license headers..." - @./check_license.sh | tee -a lint.log + @./checklicense.sh | tee -a lint.log @[ ! -s lint.log ] -else - @echo "Skipping linters on" $(GO_VERSION) -endif + +$(GOLINT): + go install golang.org/x/lint/golint + +$(STATICCHECK): + go install honnef.co/go/tools/cmd/staticcheck .PHONY: test test: - go test -race $(PKGS) + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go test -race ./...) &&) true .PHONY: cover cover: - ./scripts/cover.sh $(PKGS) + go test -race -coverprofile=cover.out -coverpkg=./... ./... + go tool cover -html=cover.out -o cover.html .PHONY: bench BENCH ?= . bench: - @$(foreach pkg,$(PKGS),go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) $(pkg);) + @$(foreach dir,$(MODULE_DIRS), ( \ + cd $(dir) && \ + go list ./... | xargs -n1 go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) \ + ) &&) true .PHONY: updatereadme updatereadme: diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md index f4fd1cb..bcea28a 100644 --- a/vendor/go.uber.org/zap/README.md +++ b/vendor/go.uber.org/zap/README.md @@ -64,43 +64,40 @@ id="anchor-versions">[1](#footnote-versions) Log a message and 10 fields: -| Package | Time | Objects Allocated | -| :--- | :---: | :---: | -| :zap: zap | 3131 ns/op | 5 allocs/op | -| :zap: zap (sugared) | 4173 ns/op | 21 allocs/op | -| zerolog | 16154 ns/op | 90 allocs/op | -| lion | 16341 ns/op | 111 allocs/op | -| go-kit | 17049 ns/op | 126 allocs/op | -| logrus | 23662 ns/op | 142 allocs/op | -| log15 | 36351 ns/op | 149 allocs/op | -| apex/log | 42530 ns/op | 126 allocs/op | +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 862 ns/op | +0% | 5 allocs/op +| :zap: zap (sugared) | 1250 ns/op | +45% | 11 allocs/op +| zerolog | 4021 ns/op | +366% | 76 allocs/op +| go-kit | 4542 ns/op | +427% | 105 allocs/op +| apex/log | 26785 ns/op | +3007% | 115 allocs/op +| logrus | 29501 ns/op | +3322% | 125 allocs/op +| log15 | 29906 ns/op | +3369% | 122 allocs/op Log a message with a logger that already has 10 fields of context: -| Package | Time | Objects Allocated | -| :--- | :---: | :---: | -| :zap: zap | 380 ns/op | 0 allocs/op | -| :zap: zap (sugared) | 564 ns/op | 2 allocs/op | -| zerolog | 321 ns/op | 0 allocs/op | -| lion | 7092 ns/op | 39 allocs/op | -| go-kit | 20226 ns/op | 115 allocs/op | -| logrus | 22312 ns/op | 130 allocs/op | -| log15 | 28788 ns/op | 79 allocs/op | -| apex/log | 42063 ns/op | 115 allocs/op | +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 126 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 187 ns/op | +48% | 2 allocs/op +| zerolog | 88 ns/op | -30% | 0 allocs/op +| go-kit | 5087 ns/op | +3937% | 103 allocs/op +| log15 | 18548 ns/op | +14621% | 73 allocs/op +| apex/log | 26012 ns/op | +20544% | 104 allocs/op +| logrus | 27236 ns/op | +21516% | 113 allocs/op Log a static string, without any context or `printf`-style templating: -| Package | Time | Objects Allocated | -| :--- | :---: | :---: | -| :zap: zap | 361 ns/op | 0 allocs/op | -| :zap: zap (sugared) | 534 ns/op | 2 allocs/op | -| zerolog | 323 ns/op | 0 allocs/op | -| standard library | 575 ns/op | 2 allocs/op | -| go-kit | 922 ns/op | 13 allocs/op | -| lion | 1413 ns/op | 10 allocs/op | -| logrus | 2291 ns/op | 27 allocs/op | -| apex/log | 3690 ns/op | 11 allocs/op | -| log15 | 5954 ns/op | 26 allocs/op | +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 118 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 191 ns/op | +62% | 2 allocs/op +| zerolog | 93 ns/op | -21% | 0 allocs/op +| go-kit | 280 ns/op | +137% | 11 allocs/op +| standard library | 499 ns/op | +323% | 2 allocs/op +| apex/log | 1990 ns/op | +1586% | 10 allocs/op +| logrus | 3129 ns/op | +2552% | 24 allocs/op +| log15 | 3887 ns/op | +3194% | 23 allocs/op ## Development Status: Stable @@ -124,13 +121,14 @@ Released under the [MIT License](LICENSE.txt). 1 In particular, keep in mind that we may be benchmarking against slightly older versions of other packages. Versions are -pinned in zap's [glide.lock][] file. [↩](#anchor-versions) +pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions) [doc-img]: https://godoc.org/go.uber.org/zap?status.svg [doc]: https://godoc.org/go.uber.org/zap -[ci-img]: https://travis-ci.org/uber-go/zap.svg?branch=master -[ci]: https://travis-ci.org/uber-go/zap +[ci-img]: https://travis-ci.com/uber-go/zap.svg?branch=master +[ci]: https://travis-ci.com/uber-go/zap [cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg [cov]: https://codecov.io/gh/uber-go/zap [benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks -[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock +[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod + diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go index 7592e8c..3f4b86e 100644 --- a/vendor/go.uber.org/zap/buffer/buffer.go +++ b/vendor/go.uber.org/zap/buffer/buffer.go @@ -23,7 +23,10 @@ // package's zero-allocation formatters. package buffer // import "go.uber.org/zap/buffer" -import "strconv" +import ( + "strconv" + "time" +) const _size = 1024 // by default, create 1 KiB buffers @@ -49,6 +52,11 @@ func (b *Buffer) AppendInt(i int64) { b.bs = strconv.AppendInt(b.bs, i, 10) } +// AppendTime appends the time formatted using the specified layout. +func (b *Buffer) AppendTime(t time.Time, layout string) { + b.bs = t.AppendFormat(b.bs, layout) +} + // AppendUint appends an unsigned integer to the underlying buffer (assuming // base 10). func (b *Buffer) AppendUint(i uint64) { diff --git a/vendor/go.uber.org/zap/check_license.sh b/vendor/go.uber.org/zap/checklicense.sh old mode 100755 new mode 100644 similarity index 100% rename from vendor/go.uber.org/zap/check_license.sh rename to vendor/go.uber.org/zap/checklicense.sh diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go index 6fe17d9..55637fb 100644 --- a/vendor/go.uber.org/zap/config.go +++ b/vendor/go.uber.org/zap/config.go @@ -21,6 +21,7 @@ package zap import ( + "fmt" "sort" "time" @@ -31,10 +32,14 @@ import ( // global CPU and I/O load that logging puts on your process while attempting // to preserve a representative subset of your logs. // -// Values configured here are per-second. See zapcore.NewSampler for details. +// If specified, the Sampler will invoke the Hook after each decision. +// +// Values configured here are per-second. See zapcore.NewSamplerWithOptions for +// details. type SamplingConfig struct { - Initial int `json:"initial" yaml:"initial"` - Thereafter int `json:"thereafter" yaml:"thereafter"` + Initial int `json:"initial" yaml:"initial"` + Thereafter int `json:"thereafter" yaml:"thereafter"` + Hook func(zapcore.Entry, zapcore.SamplingDecision) `json:"-" yaml:"-"` } // Config offers a declarative way to construct a logger. It doesn't do @@ -96,6 +101,7 @@ func NewProductionEncoderConfig() zapcore.EncoderConfig { LevelKey: "level", NameKey: "logger", CallerKey: "caller", + FunctionKey: zapcore.OmitKey, MessageKey: "msg", StacktraceKey: "stacktrace", LineEnding: zapcore.DefaultLineEnding, @@ -135,6 +141,7 @@ func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { LevelKey: "L", NameKey: "N", CallerKey: "C", + FunctionKey: zapcore.OmitKey, MessageKey: "M", StacktraceKey: "S", LineEnding: zapcore.DefaultLineEnding, @@ -174,6 +181,10 @@ func (cfg Config) Build(opts ...Option) (*Logger, error) { return nil, err } + if cfg.Level == (AtomicLevel{}) { + return nil, fmt.Errorf("missing Level") + } + log := New( zapcore.NewCore(enc, sink, cfg.Level), cfg.buildOptions(errSink)..., @@ -203,9 +214,19 @@ func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option { opts = append(opts, AddStacktrace(stackLevel)) } - if cfg.Sampling != nil { + if scfg := cfg.Sampling; scfg != nil { opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core { - return zapcore.NewSampler(core, time.Second, int(cfg.Sampling.Initial), int(cfg.Sampling.Thereafter)) + var samplerOpts []zapcore.SamplerOption + if scfg.Hook != nil { + samplerOpts = append(samplerOpts, zapcore.SamplerHook(scfg.Hook)) + } + return zapcore.NewSamplerWithOptions( + core, + time.Second, + cfg.Sampling.Initial, + cfg.Sampling.Thereafter, + samplerOpts..., + ) })) } diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go index 2e9d3c3..08ed833 100644 --- a/vendor/go.uber.org/zap/encoder.go +++ b/vendor/go.uber.org/zap/encoder.go @@ -62,6 +62,10 @@ func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapco } func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil { + return nil, fmt.Errorf("missing EncodeTime in EncoderConfig") + } + _encoderMutex.RLock() defer _encoderMutex.RUnlock() if name == "" { diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go index 5130e13..3c0d7d9 100644 --- a/vendor/go.uber.org/zap/field.go +++ b/vendor/go.uber.org/zap/field.go @@ -32,12 +32,23 @@ import ( // improves the navigability of this package's API documentation. type Field = zapcore.Field +var ( + _minTimeInt64 = time.Unix(0, math.MinInt64) + _maxTimeInt64 = time.Unix(0, math.MaxInt64) +) + // Skip constructs a no-op field, which is often useful when handling invalid // inputs in other Field constructors. func Skip() Field { return Field{Type: zapcore.SkipType} } +// nilField returns a field which will marshal explicitly as nil. See motivation +// in https://github.com/uber-go/zap/issues/753 . If we ever make breaking +// changes and add zapcore.NilType and zapcore.ObjectEncoder.AddNil, the +// implementation here should be changed to reflect that. +func nilField(key string) Field { return Reflect(key, nil) } + // Binary constructs a field that carries an opaque binary blob. // // Binary data is serialized in an encoding-appropriate format. For example, @@ -56,6 +67,15 @@ func Bool(key string, val bool) Field { return Field{Key: key, Type: zapcore.BoolType, Integer: ival} } +// Boolp constructs a field that carries a *bool. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Boolp(key string, val *bool) Field { + if val == nil { + return nilField(key) + } + return Bool(key, *val) +} + // ByteString constructs a field that carries UTF-8 encoded text as a []byte. // To log opaque binary blobs (which aren't necessarily valid UTF-8), use // Binary. @@ -70,6 +90,15 @@ func Complex128(key string, val complex128) Field { return Field{Key: key, Type: zapcore.Complex128Type, Interface: val} } +// Complex128p constructs a field that carries a *complex128. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Complex128p(key string, val *complex128) Field { + if val == nil { + return nilField(key) + } + return Complex128(key, *val) +} + // Complex64 constructs a field that carries a complex number. Unlike most // numeric fields, this costs an allocation (to convert the complex64 to // interface{}). @@ -77,6 +106,15 @@ func Complex64(key string, val complex64) Field { return Field{Key: key, Type: zapcore.Complex64Type, Interface: val} } +// Complex64p constructs a field that carries a *complex64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Complex64p(key string, val *complex64) Field { + if val == nil { + return nilField(key) + } + return Complex64(key, *val) +} + // Float64 constructs a field that carries a float64. The way the // floating-point value is represented is encoder-dependent, so marshaling is // necessarily lazy. @@ -84,6 +122,15 @@ func Float64(key string, val float64) Field { return Field{Key: key, Type: zapcore.Float64Type, Integer: int64(math.Float64bits(val))} } +// Float64p constructs a field that carries a *float64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Float64p(key string, val *float64) Field { + if val == nil { + return nilField(key) + } + return Float64(key, *val) +} + // Float32 constructs a field that carries a float32. The way the // floating-point value is represented is encoder-dependent, so marshaling is // necessarily lazy. @@ -91,66 +138,183 @@ func Float32(key string, val float32) Field { return Field{Key: key, Type: zapcore.Float32Type, Integer: int64(math.Float32bits(val))} } +// Float32p constructs a field that carries a *float32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Float32p(key string, val *float32) Field { + if val == nil { + return nilField(key) + } + return Float32(key, *val) +} + // Int constructs a field with the given key and value. func Int(key string, val int) Field { return Int64(key, int64(val)) } +// Intp constructs a field that carries a *int. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Intp(key string, val *int) Field { + if val == nil { + return nilField(key) + } + return Int(key, *val) +} + // Int64 constructs a field with the given key and value. func Int64(key string, val int64) Field { return Field{Key: key, Type: zapcore.Int64Type, Integer: val} } +// Int64p constructs a field that carries a *int64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int64p(key string, val *int64) Field { + if val == nil { + return nilField(key) + } + return Int64(key, *val) +} + // Int32 constructs a field with the given key and value. func Int32(key string, val int32) Field { return Field{Key: key, Type: zapcore.Int32Type, Integer: int64(val)} } +// Int32p constructs a field that carries a *int32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int32p(key string, val *int32) Field { + if val == nil { + return nilField(key) + } + return Int32(key, *val) +} + // Int16 constructs a field with the given key and value. func Int16(key string, val int16) Field { return Field{Key: key, Type: zapcore.Int16Type, Integer: int64(val)} } +// Int16p constructs a field that carries a *int16. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int16p(key string, val *int16) Field { + if val == nil { + return nilField(key) + } + return Int16(key, *val) +} + // Int8 constructs a field with the given key and value. func Int8(key string, val int8) Field { return Field{Key: key, Type: zapcore.Int8Type, Integer: int64(val)} } +// Int8p constructs a field that carries a *int8. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int8p(key string, val *int8) Field { + if val == nil { + return nilField(key) + } + return Int8(key, *val) +} + // String constructs a field with the given key and value. func String(key string, val string) Field { return Field{Key: key, Type: zapcore.StringType, String: val} } +// Stringp constructs a field that carries a *string. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Stringp(key string, val *string) Field { + if val == nil { + return nilField(key) + } + return String(key, *val) +} + // Uint constructs a field with the given key and value. func Uint(key string, val uint) Field { return Uint64(key, uint64(val)) } +// Uintp constructs a field that carries a *uint. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uintp(key string, val *uint) Field { + if val == nil { + return nilField(key) + } + return Uint(key, *val) +} + // Uint64 constructs a field with the given key and value. func Uint64(key string, val uint64) Field { return Field{Key: key, Type: zapcore.Uint64Type, Integer: int64(val)} } +// Uint64p constructs a field that carries a *uint64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint64p(key string, val *uint64) Field { + if val == nil { + return nilField(key) + } + return Uint64(key, *val) +} + // Uint32 constructs a field with the given key and value. func Uint32(key string, val uint32) Field { return Field{Key: key, Type: zapcore.Uint32Type, Integer: int64(val)} } +// Uint32p constructs a field that carries a *uint32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint32p(key string, val *uint32) Field { + if val == nil { + return nilField(key) + } + return Uint32(key, *val) +} + // Uint16 constructs a field with the given key and value. func Uint16(key string, val uint16) Field { return Field{Key: key, Type: zapcore.Uint16Type, Integer: int64(val)} } +// Uint16p constructs a field that carries a *uint16. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint16p(key string, val *uint16) Field { + if val == nil { + return nilField(key) + } + return Uint16(key, *val) +} + // Uint8 constructs a field with the given key and value. func Uint8(key string, val uint8) Field { return Field{Key: key, Type: zapcore.Uint8Type, Integer: int64(val)} } +// Uint8p constructs a field that carries a *uint8. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint8p(key string, val *uint8) Field { + if val == nil { + return nilField(key) + } + return Uint8(key, *val) +} + // Uintptr constructs a field with the given key and value. func Uintptr(key string, val uintptr) Field { return Field{Key: key, Type: zapcore.UintptrType, Integer: int64(val)} } +// Uintptrp constructs a field that carries a *uintptr. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uintptrp(key string, val *uintptr) Field { + if val == nil { + return nilField(key) + } + return Uintptr(key, *val) +} + // Reflect constructs a field with the given key and an arbitrary object. It uses // an encoding-appropriate, reflection-based function to lazily serialize nearly // any object into the logging context, but it's relatively slow and @@ -180,19 +344,37 @@ func Stringer(key string, val fmt.Stringer) Field { // Time constructs a Field with the given key and value. The encoder // controls how the time is serialized. func Time(key string, val time.Time) Field { + if val.Before(_minTimeInt64) || val.After(_maxTimeInt64) { + return Field{Key: key, Type: zapcore.TimeFullType, Interface: val} + } return Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()} } +// Timep constructs a field that carries a *time.Time. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Timep(key string, val *time.Time) Field { + if val == nil { + return nilField(key) + } + return Time(key, *val) +} + // Stack constructs a field that stores a stacktrace of the current goroutine // under provided key. Keep in mind that taking a stacktrace is eager and // expensive (relatively speaking); this function both makes an allocation and // takes about two microseconds. func Stack(key string) Field { + return StackSkip(key, 1) // skip Stack +} + +// StackSkip constructs a field similarly to Stack, but also skips the given +// number of frames from the top of the stacktrace. +func StackSkip(key string, skip int) Field { // Returning the stacktrace as a string costs an allocation, but saves us // from expanding the zapcore.Field union struct to include a byte slice. Since // taking a stacktrace is already so expensive (~10us), the extra allocation // is okay. - return String(key, takeStacktrace()) + return String(key, takeStacktrace(skip+1)) // skip StackSkip } // Duration constructs a field with the given key and value. The encoder @@ -201,6 +383,15 @@ func Duration(key string, val time.Duration) Field { return Field{Key: key, Type: zapcore.DurationType, Integer: int64(val)} } +// Durationp constructs a field that carries a *time.Duration. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Durationp(key string, val *time.Duration) Field { + if val == nil { + return nilField(key) + } + return Duration(key, *val) +} + // Object constructs a field with the given key and ObjectMarshaler. It // provides a flexible, but still type-safe and efficient, way to add map- or // struct-like user-defined types to the logging context. The struct's @@ -224,78 +415,116 @@ func Any(key string, value interface{}) Field { return Array(key, val) case bool: return Bool(key, val) + case *bool: + return Boolp(key, val) case []bool: return Bools(key, val) case complex128: return Complex128(key, val) + case *complex128: + return Complex128p(key, val) case []complex128: return Complex128s(key, val) case complex64: return Complex64(key, val) + case *complex64: + return Complex64p(key, val) case []complex64: return Complex64s(key, val) case float64: return Float64(key, val) + case *float64: + return Float64p(key, val) case []float64: return Float64s(key, val) case float32: return Float32(key, val) + case *float32: + return Float32p(key, val) case []float32: return Float32s(key, val) case int: return Int(key, val) + case *int: + return Intp(key, val) case []int: return Ints(key, val) case int64: return Int64(key, val) + case *int64: + return Int64p(key, val) case []int64: return Int64s(key, val) case int32: return Int32(key, val) + case *int32: + return Int32p(key, val) case []int32: return Int32s(key, val) case int16: return Int16(key, val) + case *int16: + return Int16p(key, val) case []int16: return Int16s(key, val) case int8: return Int8(key, val) + case *int8: + return Int8p(key, val) case []int8: return Int8s(key, val) case string: return String(key, val) + case *string: + return Stringp(key, val) case []string: return Strings(key, val) case uint: return Uint(key, val) + case *uint: + return Uintp(key, val) case []uint: return Uints(key, val) case uint64: return Uint64(key, val) + case *uint64: + return Uint64p(key, val) case []uint64: return Uint64s(key, val) case uint32: return Uint32(key, val) + case *uint32: + return Uint32p(key, val) case []uint32: return Uint32s(key, val) case uint16: return Uint16(key, val) + case *uint16: + return Uint16p(key, val) case []uint16: return Uint16s(key, val) case uint8: return Uint8(key, val) + case *uint8: + return Uint8p(key, val) case []byte: return Binary(key, val) case uintptr: return Uintptr(key, val) + case *uintptr: + return Uintptrp(key, val) case []uintptr: return Uintptrs(key, val) case time.Time: return Time(key, val) + case *time.Time: + return Timep(key, val) case []time.Time: return Times(key, val) case time.Duration: return Duration(key, val) + case *time.Duration: + return Durationp(key, val) case []time.Duration: return Durations(key, val) case error: diff --git a/vendor/go.uber.org/zap/glide.lock b/vendor/go.uber.org/zap/glide.lock deleted file mode 100644 index 881b462..0000000 --- a/vendor/go.uber.org/zap/glide.lock +++ /dev/null @@ -1,76 +0,0 @@ -hash: f073ba522c06c88ea3075bde32a8aaf0969a840a66cab6318a0897d141ffee92 -updated: 2017-07-22T18:06:49.598185334-07:00 -imports: -- name: go.uber.org/atomic - version: 4e336646b2ef9fc6e47be8e21594178f98e5ebcf -- name: go.uber.org/multierr - version: 3c4937480c32f4c13a875a1829af76c98ca3d40a -testImports: -- name: github.com/apex/log - version: d9b960447bfa720077b2da653cc79e533455b499 - subpackages: - - handlers/json -- name: github.com/axw/gocov - version: 3a69a0d2a4ef1f263e2d92b041a69593d6964fe8 - subpackages: - - gocov -- name: github.com/davecgh/go-spew - version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 - subpackages: - - spew -- name: github.com/fatih/color - version: 62e9147c64a1ed519147b62a56a14e83e2be02c1 -- name: github.com/go-kit/kit - version: e10f5bf035be9af21fd5b2fb4469d5716c6ab07d - subpackages: - - log -- name: github.com/go-logfmt/logfmt - version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 -- name: github.com/go-stack/stack - version: 54be5f394ed2c3e19dac9134a40a95ba5a017f7b -- name: github.com/golang/lint - version: c5fb716d6688a859aae56d26d3e6070808df29f7 - subpackages: - - golint -- name: github.com/kr/logfmt - version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 -- name: github.com/mattn/go-colorable - version: 3fa8c76f9daed4067e4a806fb7e4dc86455c6d6a -- name: github.com/mattn/go-isatty - version: fc9e8d8ef48496124e79ae0df75490096eccf6fe -- name: github.com/mattn/goveralls - version: 6efce81852ad1b7567c17ad71b03aeccc9dd9ae0 -- name: github.com/pborman/uuid - version: e790cca94e6cc75c7064b1332e63811d4aae1a53 -- name: github.com/pkg/errors - version: 645ef00459ed84a119197bfb8d8205042c6df63d -- name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d - subpackages: - - difflib -- name: github.com/rs/zerolog - version: eed4c2b94d945e0b2456ad6aa518a443986b5f22 -- name: github.com/satori/go.uuid - version: 5bf94b69c6b68ee1b541973bb8e1144db23a194b -- name: github.com/sirupsen/logrus - version: 7dd06bf38e1e13df288d471a57d5adbac106be9e -- name: github.com/stretchr/testify - version: f6abca593680b2315d2075e0f5e2a9751e3f431a - subpackages: - - assert - - require -- name: go.pedge.io/lion - version: 87958e8713f1fa138d993087133b97e976642159 -- name: golang.org/x/sys - version: c4489faa6e5ab84c0ef40d6ee878f7a030281f0f - subpackages: - - unix -- name: golang.org/x/tools - version: 496819729719f9d07692195e0a94d6edd2251389 - subpackages: - - cover -- name: gopkg.in/inconshreveable/log15.v2 - version: b105bd37f74e5d9dc7b6ad7806715c7a2b83fd3f - subpackages: - - stack - - term diff --git a/vendor/go.uber.org/zap/glide.yaml b/vendor/go.uber.org/zap/glide.yaml index 9441259..8e1d05e 100644 --- a/vendor/go.uber.org/zap/glide.yaml +++ b/vendor/go.uber.org/zap/glide.yaml @@ -22,12 +22,11 @@ testImport: - package: github.com/mattn/goveralls - package: github.com/pborman/uuid - package: github.com/pkg/errors -- package: go.pedge.io/lion - package: github.com/rs/zerolog - package: golang.org/x/tools subpackages: - cover -- package: github.com/golang/lint +- package: golang.org/x/lint subpackages: - golint - package: github.com/axw/gocov diff --git a/vendor/go.uber.org/zap/go.mod b/vendor/go.uber.org/zap/go.mod new file mode 100644 index 0000000..6ef4db7 --- /dev/null +++ b/vendor/go.uber.org/zap/go.mod @@ -0,0 +1,13 @@ +module go.uber.org/zap + +go 1.13 + +require ( + github.com/pkg/errors v0.8.1 + github.com/stretchr/testify v1.4.0 + go.uber.org/atomic v1.6.0 + go.uber.org/multierr v1.5.0 + golang.org/x/lint v0.0.0-20190930215403-16217165b5de + gopkg.in/yaml.v2 v2.2.2 + honnef.co/go/tools v0.0.1-2019.2.3 +) diff --git a/vendor/go.uber.org/zap/go.sum b/vendor/go.uber.org/zap/go.sum new file mode 100644 index 0000000..99cdb93 --- /dev/null +++ b/vendor/go.uber.org/zap/go.sum @@ -0,0 +1,56 @@ +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c h1:IGkKhmfzcztjm6gYkykvu/NiS8kaqbCWAEWWAyf8J5U= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go index dc8f6e3..ea484ae 100644 --- a/vendor/go.uber.org/zap/logger.go +++ b/vendor/go.uber.org/zap/logger.go @@ -49,6 +49,7 @@ type Logger struct { addStack zapcore.LevelEnabler callerSkip int + onFatal zapcore.CheckWriteAction // default is WriteThenFatal } // New constructs a new Logger from the provided zapcore.Core and Options. If @@ -258,6 +259,12 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // (e.g., Check, Info, Fatal). const callerSkipOffset = 2 + // Check the level first to reduce the cost of disabled log calls. + // Since Panic and higher may exit, we skip the optimization for those levels. + if lvl < zapcore.DPanicLevel && !log.core.Enabled(lvl) { + return nil + } + // Create basic checked entry thru the core; this will be non-nil if the // log message will actually be written somewhere. ent := zapcore.Entry{ @@ -274,7 +281,13 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { case zapcore.PanicLevel: ce = ce.Should(ent, zapcore.WriteThenPanic) case zapcore.FatalLevel: - ce = ce.Should(ent, zapcore.WriteThenFatal) + onFatal := log.onFatal + // Noop is the default value for CheckWriteAction, and it leads to + // continued execution after a Fatal which is unexpected. + if onFatal == zapcore.WriteThenNoop { + onFatal = zapcore.WriteThenFatal + } + ce = ce.Should(ent, onFatal) case zapcore.DPanicLevel: if log.development { ce = ce.Should(ent, zapcore.WriteThenPanic) @@ -291,15 +304,41 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // Thread the error output through to the CheckedEntry. ce.ErrorOutput = log.errorOutput if log.addCaller { - ce.Entry.Caller = zapcore.NewEntryCaller(runtime.Caller(log.callerSkip + callerSkipOffset)) - if !ce.Entry.Caller.Defined { + frame, defined := getCallerFrame(log.callerSkip + callerSkipOffset) + if !defined { fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", time.Now().UTC()) log.errorOutput.Sync() } + + ce.Entry.Caller = zapcore.EntryCaller{ + Defined: defined, + PC: frame.PC, + File: frame.File, + Line: frame.Line, + Function: frame.Function, + } } if log.addStack.Enabled(ce.Entry.Level) { - ce.Entry.Stack = Stack("").String + ce.Entry.Stack = StackSkip("", log.callerSkip+callerSkipOffset).String } return ce } + +// getCallerFrame gets caller frame. The argument skip is the number of stack +// frames to ascend, with 0 identifying the caller of getCallerFrame. The +// boolean ok is false if it was not possible to recover the information. +// +// Note: This implementation is similar to runtime.Caller, but it returns the whole frame. +func getCallerFrame(skip int) (frame runtime.Frame, ok bool) { + const skipOffset = 2 // skip getCallerFrame and Callers + + pc := make([]uintptr, 1) + numFrames := runtime.Callers(skip+skipOffset, pc[:]) + if numFrames < 1 { + return + } + + frame, _ = runtime.CallersFrames(pc).Next() + return frame, frame.PC != 0 +} diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go index 7a6b0fc..0135c20 100644 --- a/vendor/go.uber.org/zap/options.go +++ b/vendor/go.uber.org/zap/options.go @@ -20,7 +20,11 @@ package zap -import "go.uber.org/zap/zapcore" +import ( + "fmt" + + "go.uber.org/zap/zapcore" +) // An Option configures a Logger. type Option interface { @@ -82,11 +86,18 @@ func Development() Option { }) } -// AddCaller configures the Logger to annotate each message with the filename -// and line number of zap's caller. +// AddCaller configures the Logger to annotate each message with the filename, +// line number, and function name of zap's caller. See also WithCaller. func AddCaller() Option { + return WithCaller(true) +} + +// WithCaller configures the Logger to annotate each message with the filename, +// line number, and function name of zap's caller, or not, depending on the +// value of enabled. This is a generalized form of AddCaller. +func WithCaller(enabled bool) Option { return optionFunc(func(log *Logger) { - log.addCaller = true + log.addCaller = enabled }) } @@ -107,3 +118,23 @@ func AddStacktrace(lvl zapcore.LevelEnabler) Option { log.addStack = lvl }) } + +// IncreaseLevel increase the level of the logger. It has no effect if +// the passed in level tries to decrease the level of the logger. +func IncreaseLevel(lvl zapcore.LevelEnabler) Option { + return optionFunc(func(log *Logger) { + core, err := zapcore.NewIncreaseLevelCore(log.core, lvl) + if err != nil { + fmt.Fprintf(log.errorOutput, "failed to IncreaseLevel: %v\n", err) + } else { + log.core = core + } + }) +} + +// OnFatal sets the action to take on fatal logs. +func OnFatal(action zapcore.CheckWriteAction) Option { + return optionFunc(func(log *Logger) { + log.onFatal = action + }) +} diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go index ff0becf..df46fa8 100644 --- a/vendor/go.uber.org/zap/sink.go +++ b/vendor/go.uber.org/zap/sink.go @@ -136,7 +136,7 @@ func newFileSink(u *url.URL) (Sink, error) { case "stderr": return nopCloserSink{os.Stderr}, nil } - return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) + return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) } func normalizeScheme(s string) (string, error) { diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go index 100fac2..0cf8c1d 100644 --- a/vendor/go.uber.org/zap/stacktrace.go +++ b/vendor/go.uber.org/zap/stacktrace.go @@ -22,28 +22,20 @@ package zap import ( "runtime" - "strings" "sync" "go.uber.org/zap/internal/bufferpool" ) -const _zapPackage = "go.uber.org/zap" - var ( _stacktracePool = sync.Pool{ New: func() interface{} { return newProgramCounters(64) }, } - - // We add "." and "/" suffixes to the package name to ensure we only match - // the exact package and not any package with the same prefix. - _zapStacktracePrefixes = addPrefix(_zapPackage, ".", "/") - _zapStacktraceVendorContains = addPrefix("/vendor/", _zapStacktracePrefixes...) ) -func takeStacktrace() string { +func takeStacktrace(skip int) string { buffer := bufferpool.Get() defer buffer.Free() programCounters := _stacktracePool.Get().(*programCounters) @@ -51,9 +43,9 @@ func takeStacktrace() string { var numFrames int for { - // Skip the call to runtime.Counters and takeStacktrace so that the + // Skip the call to runtime.Callers and takeStacktrace so that the // program counters start at the caller of takeStacktrace. - numFrames = runtime.Callers(2, programCounters.pcs) + numFrames = runtime.Callers(skip+2, programCounters.pcs) if numFrames < len(programCounters.pcs) { break } @@ -63,19 +55,12 @@ func takeStacktrace() string { } i := 0 - skipZapFrames := true // skip all consecutive zap frames at the beginning. frames := runtime.CallersFrames(programCounters.pcs[:numFrames]) // Note: On the last iteration, frames.Next() returns false, with a valid // frame, but we ignore this frame. The last frame is a a runtime frame which // adds noise, since it's only either runtime.main or runtime.goexit. for frame, more := frames.Next(); more; frame, more = frames.Next() { - if skipZapFrames && isZapFrame(frame.Function) { - continue - } else { - skipZapFrames = false - } - if i != 0 { buffer.AppendByte('\n') } @@ -91,24 +76,6 @@ func takeStacktrace() string { return buffer.String() } -func isZapFrame(function string) bool { - for _, prefix := range _zapStacktracePrefixes { - if strings.HasPrefix(function, prefix) { - return true - } - } - - // We can't use a prefix match here since the location of the vendor - // directory affects the prefix. Instead we do a contains match. - for _, contains := range _zapStacktraceVendorContains { - if strings.Contains(function, contains) { - return true - } - } - - return false -} - type programCounters struct { pcs []uintptr } @@ -116,11 +83,3 @@ type programCounters struct { func newProgramCounters(size int) *programCounters { return &programCounters{make([]uintptr, size)} } - -func addPrefix(prefix string, ss ...string) []string { - withPrefix := make([]string, len(ss)) - for i, s := range ss { - withPrefix[i] = prefix + s - } - return withPrefix -} diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go index b787596..3b68f8c 100644 --- a/vendor/go.uber.org/zap/zapcore/console_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -56,6 +56,10 @@ type consoleEncoder struct { // encoder configuration, it will omit any element whose key is set to the empty // string. func NewConsoleEncoder(cfg EncoderConfig) Encoder { + if len(cfg.ConsoleSeparator) == 0 { + // Use a default delimiter of '\t' for backwards compatibility + cfg.ConsoleSeparator = "\t" + } return consoleEncoder{newJSONEncoder(cfg, true)} } @@ -89,12 +93,17 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, nameEncoder(ent.LoggerName, arr) } - if ent.Caller.Defined && c.CallerKey != "" && c.EncodeCaller != nil { - c.EncodeCaller(ent.Caller, arr) + if ent.Caller.Defined { + if c.CallerKey != "" && c.EncodeCaller != nil { + c.EncodeCaller(ent.Caller, arr) + } + if c.FunctionKey != "" { + arr.AppendString(ent.Caller.Function) + } } for i := range arr.elems { if i > 0 { - line.AppendByte('\t') + line.AppendString(c.ConsoleSeparator) } fmt.Fprint(line, arr.elems[i]) } @@ -102,7 +111,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, // Add the message itself. if c.MessageKey != "" { - c.addTabIfNecessary(line) + c.addSeparatorIfNecessary(line) line.AppendString(ent.Message) } @@ -126,7 +135,12 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) { context := c.jsonEncoder.Clone().(*jsonEncoder) - defer context.buf.Free() + defer func() { + // putJSONEncoder assumes the buffer is still used, but we write out the buffer so + // we can free it. + context.buf.Free() + putJSONEncoder(context) + }() addFields(context, extra) context.closeOpenNamespaces() @@ -134,14 +148,14 @@ func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) { return } - c.addTabIfNecessary(line) + c.addSeparatorIfNecessary(line) line.AppendByte('{') line.Write(context.buf.Bytes()) line.AppendByte('}') } -func (c consoleEncoder) addTabIfNecessary(line *buffer.Buffer) { +func (c consoleEncoder) addSeparatorIfNecessary(line *buffer.Buffer) { if line.Len() > 0 { - line.AppendByte('\t') + line.AppendString(c.ConsoleSeparator) } } diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go index f050952..6601ca1 100644 --- a/vendor/go.uber.org/zap/zapcore/encoder.go +++ b/vendor/go.uber.org/zap/zapcore/encoder.go @@ -21,6 +21,7 @@ package zapcore import ( + "encoding/json" "time" "go.uber.org/zap/buffer" @@ -31,6 +32,9 @@ import ( // behavior. const DefaultLineEnding = "\n" +// OmitKey defines the key to use when callers want to remove a key from log output. +const OmitKey = "" + // A LevelEncoder serializes a Level to a primitive type. type LevelEncoder func(Level, PrimitiveArrayEncoder) @@ -109,17 +113,66 @@ func EpochNanosTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { enc.AppendInt64(t.UnixNano()) } +func encodeTimeLayout(t time.Time, layout string, enc PrimitiveArrayEncoder) { + type appendTimeEncoder interface { + AppendTimeLayout(time.Time, string) + } + + if enc, ok := enc.(appendTimeEncoder); ok { + enc.AppendTimeLayout(t, layout) + return + } + + enc.AppendString(t.Format(layout)) +} + // ISO8601TimeEncoder serializes a time.Time to an ISO8601-formatted string // with millisecond precision. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. func ISO8601TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { - enc.AppendString(t.Format("2006-01-02T15:04:05.000Z0700")) + encodeTimeLayout(t, "2006-01-02T15:04:05.000Z0700", enc) +} + +// RFC3339TimeEncoder serializes a time.Time to an RFC3339-formatted string. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. +func RFC3339TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, time.RFC3339, enc) +} + +// RFC3339NanoTimeEncoder serializes a time.Time to an RFC3339-formatted string +// with nanosecond precision. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. +func RFC3339NanoTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, time.RFC3339Nano, enc) } -// UnmarshalText unmarshals text to a TimeEncoder. "iso8601" and "ISO8601" are -// unmarshaled to ISO8601TimeEncoder, "millis" is unmarshaled to -// EpochMillisTimeEncoder, and anything else is unmarshaled to EpochTimeEncoder. +// TimeEncoderOfLayout returns TimeEncoder which serializes a time.Time using +// given layout. +func TimeEncoderOfLayout(layout string) TimeEncoder { + return func(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, layout, enc) + } +} + +// UnmarshalText unmarshals text to a TimeEncoder. +// "rfc3339nano" and "RFC3339Nano" are unmarshaled to RFC3339NanoTimeEncoder. +// "rfc3339" and "RFC3339" are unmarshaled to RFC3339TimeEncoder. +// "iso8601" and "ISO8601" are unmarshaled to ISO8601TimeEncoder. +// "millis" is unmarshaled to EpochMillisTimeEncoder. +// "nanos" is unmarshaled to EpochNanosEncoder. +// Anything else is unmarshaled to EpochTimeEncoder. func (e *TimeEncoder) UnmarshalText(text []byte) error { switch string(text) { + case "rfc3339nano", "RFC3339Nano": + *e = RFC3339NanoTimeEncoder + case "rfc3339", "RFC3339": + *e = RFC3339TimeEncoder case "iso8601", "ISO8601": *e = ISO8601TimeEncoder case "millis": @@ -132,6 +185,35 @@ func (e *TimeEncoder) UnmarshalText(text []byte) error { return nil } +// UnmarshalYAML unmarshals YAML to a TimeEncoder. +// If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout. +// timeEncoder: +// layout: 06/01/02 03:04pm +// If value is string, it uses UnmarshalText. +// timeEncoder: iso8601 +func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error { + var o struct { + Layout string `json:"layout" yaml:"layout"` + } + if err := unmarshal(&o); err == nil { + *e = TimeEncoderOfLayout(o.Layout) + return nil + } + + var s string + if err := unmarshal(&s); err != nil { + return err + } + return e.UnmarshalText([]byte(s)) +} + +// UnmarshalJSON unmarshals JSON to a TimeEncoder as same way UnmarshalYAML does. +func (e *TimeEncoder) UnmarshalJSON(data []byte) error { + return e.UnmarshalYAML(func(v interface{}) error { + return json.Unmarshal(data, v) + }) +} + // A DurationEncoder serializes a time.Duration to a primitive type. type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) @@ -146,6 +228,12 @@ func NanosDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { enc.AppendInt64(int64(d)) } +// MillisDurationEncoder serializes a time.Duration to an integer number of +// milliseconds elapsed. +func MillisDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendInt64(d.Nanoseconds() / 1e6) +} + // StringDurationEncoder serializes a time.Duration using its built-in String // method. func StringDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { @@ -161,6 +249,8 @@ func (e *DurationEncoder) UnmarshalText(text []byte) error { *e = StringDurationEncoder case "nanos": *e = NanosDurationEncoder + case "ms": + *e = MillisDurationEncoder default: *e = SecondsDurationEncoder } @@ -227,6 +317,7 @@ type EncoderConfig struct { TimeKey string `json:"timeKey" yaml:"timeKey"` NameKey string `json:"nameKey" yaml:"nameKey"` CallerKey string `json:"callerKey" yaml:"callerKey"` + FunctionKey string `json:"functionKey" yaml:"functionKey"` StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"` LineEnding string `json:"lineEnding" yaml:"lineEnding"` // Configure the primitive representations of common complex types. For @@ -239,6 +330,9 @@ type EncoderConfig struct { // Unlike the other primitive type encoders, EncodeName is optional. The // zero value falls back to FullNameEncoder. EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"` + // Configures the field separator used by the console encoder. Defaults + // to tab. + ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"` } // ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a @@ -272,8 +366,8 @@ type ObjectEncoder interface { AddUint8(key string, value uint8) AddUintptr(key string, value uintptr) - // AddReflected uses reflection to serialize arbitrary objects, so it's slow - // and allocation-heavy. + // AddReflected uses reflection to serialize arbitrary objects, so it can be + // slow and allocation-heavy. AddReflected(key string, value interface{}) error // OpenNamespace opens an isolated namespace where all subsequent fields will // be added. Applications can use namespaces to prevent key collisions when @@ -343,6 +437,7 @@ type Encoder interface { Clone() Encoder // EncodeEntry encodes an entry and fields, along with any accumulated - // context, into a byte buffer and returns it. + // context, into a byte buffer and returns it. Any fields that are empty, + // including fields on the `Entry` type, should be omitted. EncodeEntry(Entry, []Field) (*buffer.Buffer, error) } diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go index 7d9893f..4aa8b4f 100644 --- a/vendor/go.uber.org/zap/zapcore/entry.go +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -22,6 +22,7 @@ package zapcore import ( "fmt" + "runtime" "strings" "sync" "time" @@ -70,10 +71,11 @@ func NewEntryCaller(pc uintptr, file string, line int, ok bool) EntryCaller { // EntryCaller represents the caller of a logging function. type EntryCaller struct { - Defined bool - PC uintptr - File string - Line int + Defined bool + PC uintptr + File string + Line int + Function string } // String returns the full path and line number of the caller. @@ -136,7 +138,8 @@ func (ec EntryCaller) TrimmedPath() string { // An Entry represents a complete log message. The entry's structured context // is already serialized, but the log level, time, message, and call site -// information are available for inspection and modification. +// information are available for inspection and modification. Any fields left +// empty will be omitted when encoding. // // Entries are pooled, so any functions that accept them MUST be careful not to // retain references to them. @@ -157,6 +160,8 @@ const ( // WriteThenNoop indicates that nothing special needs to be done. It's the // default behavior. WriteThenNoop CheckWriteAction = iota + // WriteThenGoexit runs runtime.Goexit after Write. + WriteThenGoexit // WriteThenPanic causes a panic after Write. WriteThenPanic // WriteThenFatal causes a fatal os.Exit after Write. @@ -229,6 +234,8 @@ func (ce *CheckedEntry) Write(fields ...Field) { panic(msg) case WriteThenFatal: exit.Exit() + case WriteThenGoexit: + runtime.Goexit() } } diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go index a67c7ba..9ba2272 100644 --- a/vendor/go.uber.org/zap/zapcore/error.go +++ b/vendor/go.uber.org/zap/zapcore/error.go @@ -66,11 +66,6 @@ type errorGroup interface { Errors() []error } -type causer interface { - // Provides access to the error that caused this error. - Cause() error -} - // Note that errArry and errArrayElem are very similar to the version // implemented in the top-level error.go file. We can't re-use this because // that would require exporting errArray as part of the zapcore API. diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go index ae772e4..7e255d6 100644 --- a/vendor/go.uber.org/zap/zapcore/field.go +++ b/vendor/go.uber.org/zap/zapcore/field.go @@ -65,8 +65,11 @@ const ( Int8Type // StringType indicates that the field carries a string. StringType - // TimeType indicates that the field carries a time.Time. + // TimeType indicates that the field carries a time.Time that is + // representable by a UnixNano() stored as an int64. TimeType + // TimeFullType indicates that the field carries a time.Time stored as-is. + TimeFullType // Uint64Type indicates that the field carries a uint64. Uint64Type // Uint32Type indicates that the field carries a uint32. @@ -145,6 +148,8 @@ func (f Field) AddTo(enc ObjectEncoder) { // Fall back to UTC if location is nil. enc.AddTime(f.Key, time.Unix(0, f.Integer)) } + case TimeFullType: + enc.AddTime(f.Key, f.Interface.(time.Time)) case Uint64Type: enc.AddUint64(f.Key, uint64(f.Integer)) case Uint32Type: @@ -200,13 +205,23 @@ func addFields(enc ObjectEncoder, fields []Field) { } } -func encodeStringer(key string, stringer interface{}, enc ObjectEncoder) (err error) { +func encodeStringer(key string, stringer interface{}, enc ObjectEncoder) (retErr error) { + // Try to capture panics (from nil references or otherwise) when calling + // the String() method, similar to https://golang.org/src/fmt/print.go#L540 defer func() { - if v := recover(); v != nil { - err = fmt.Errorf("PANIC=%v", v) + if err := recover(); err != nil { + // If it's a nil pointer, just say "". The likeliest causes are a + // Stringer that fails to guard against nil or a nil pointer for a + // value receiver, and in either case, "" is a nice result. + if v := reflect.ValueOf(stringer); v.Kind() == reflect.Ptr && v.IsNil() { + enc.AddString(key, "") + return + } + + retErr = fmt.Errorf("PANIC=%v", err) } }() enc.AddString(key, stringer.(fmt.Stringer).String()) - return + return nil } diff --git a/vendor/go.uber.org/zap/zapcore/increase_level.go b/vendor/go.uber.org/zap/zapcore/increase_level.go new file mode 100644 index 0000000..5a17492 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/increase_level.go @@ -0,0 +1,66 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "fmt" + +type levelFilterCore struct { + core Core + level LevelEnabler +} + +// NewIncreaseLevelCore creates a core that can be used to increase the level of +// an existing Core. It cannot be used to decrease the logging level, as it acts +// as a filter before calling the underlying core. If level decreases the log level, +// an error is returned. +func NewIncreaseLevelCore(core Core, level LevelEnabler) (Core, error) { + for l := _maxLevel; l >= _minLevel; l-- { + if !core.Enabled(l) && level.Enabled(l) { + return nil, fmt.Errorf("invalid increase level, as level %q is allowed by increased level, but not by existing core", l) + } + } + + return &levelFilterCore{core, level}, nil +} + +func (c *levelFilterCore) Enabled(lvl Level) bool { + return c.level.Enabled(lvl) +} + +func (c *levelFilterCore) With(fields []Field) Core { + return &levelFilterCore{c.core.With(fields), c.level} +} + +func (c *levelFilterCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if !c.Enabled(ent.Level) { + return ce + } + + return c.core.Check(ent, ce) +} + +func (c *levelFilterCore) Write(ent Entry, fields []Field) error { + return c.core.Write(ent, fields) +} + +func (c *levelFilterCore) Sync() error { + return c.core.Sync() +} diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go index 9aec4ea..5cf7d91 100644 --- a/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -145,15 +145,29 @@ func (enc *jsonEncoder) resetReflectBuf() { } } -func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error { +var nullLiteralBytes = []byte("null") + +// Only invoke the standard JSON encoder if there is actually something to +// encode; otherwise write JSON null literal directly. +func (enc *jsonEncoder) encodeReflected(obj interface{}) ([]byte, error) { + if obj == nil { + return nullLiteralBytes, nil + } enc.resetReflectBuf() - err := enc.reflectEnc.Encode(obj) + if err := enc.reflectEnc.Encode(obj); err != nil { + return nil, err + } + enc.reflectBuf.TrimNewline() + return enc.reflectBuf.Bytes(), nil +} + +func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error { + valueBytes, err := enc.encodeReflected(obj) if err != nil { return err } - enc.reflectBuf.TrimNewline() enc.addKey(key) - _, err = enc.buf.Write(enc.reflectBuf.Bytes()) + _, err = enc.buf.Write(valueBytes) return err } @@ -222,7 +236,9 @@ func (enc *jsonEncoder) AppendComplex128(val complex128) { func (enc *jsonEncoder) AppendDuration(val time.Duration) { cur := enc.buf.Len() - enc.EncodeDuration(val, enc) + if e := enc.EncodeDuration; e != nil { + e(val, enc) + } if cur == enc.buf.Len() { // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep // JSON valid. @@ -236,14 +252,12 @@ func (enc *jsonEncoder) AppendInt64(val int64) { } func (enc *jsonEncoder) AppendReflected(val interface{}) error { - enc.resetReflectBuf() - err := enc.reflectEnc.Encode(val) + valueBytes, err := enc.encodeReflected(val) if err != nil { return err } - enc.reflectBuf.TrimNewline() enc.addElementSeparator() - _, err = enc.buf.Write(enc.reflectBuf.Bytes()) + _, err = enc.buf.Write(valueBytes) return err } @@ -254,9 +268,18 @@ func (enc *jsonEncoder) AppendString(val string) { enc.buf.AppendByte('"') } +func (enc *jsonEncoder) AppendTimeLayout(time time.Time, layout string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.buf.AppendTime(time, layout) + enc.buf.AppendByte('"') +} + func (enc *jsonEncoder) AppendTime(val time.Time) { cur := enc.buf.Len() - enc.EncodeTime(val, enc) + if e := enc.EncodeTime; e != nil { + e(val, enc) + } if cur == enc.buf.Len() { // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep // output JSON valid. @@ -343,14 +366,20 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, final.AppendString(ent.LoggerName) } } - if ent.Caller.Defined && final.CallerKey != "" { - final.addKey(final.CallerKey) - cur := final.buf.Len() - final.EncodeCaller(ent.Caller, final) - if cur == final.buf.Len() { - // User-supplied EncodeCaller was a no-op. Fall back to strings to - // keep output JSON valid. - final.AppendString(ent.Caller.String()) + if ent.Caller.Defined { + if final.CallerKey != "" { + final.addKey(final.CallerKey) + cur := final.buf.Len() + final.EncodeCaller(ent.Caller, final) + if cur == final.buf.Len() { + // User-supplied EncodeCaller was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.Caller.String()) + } + } + if final.FunctionKey != "" { + final.addKey(final.FunctionKey) + final.AppendString(ent.Caller.Function) } } if final.MessageKey != "" { diff --git a/vendor/go.uber.org/zap/zapcore/marshaler.go b/vendor/go.uber.org/zap/zapcore/marshaler.go index 2627a65..c3c55ba 100644 --- a/vendor/go.uber.org/zap/zapcore/marshaler.go +++ b/vendor/go.uber.org/zap/zapcore/marshaler.go @@ -23,6 +23,10 @@ package zapcore // ObjectMarshaler allows user-defined types to efficiently add themselves to the // logging context, and to selectively omit information which shouldn't be // included in logs (e.g., passwords). +// +// Note: ObjectMarshaler is only used when zap.Object is used or when +// passed directly to zap.Any. It is not used when reflection-based +// encoding is used. type ObjectMarshaler interface { MarshalLogObject(ObjectEncoder) error } @@ -39,6 +43,10 @@ func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error { // ArrayMarshaler allows user-defined types to efficiently add themselves to the // logging context, and to selectively omit information which shouldn't be // included in logs (e.g., passwords). +// +// Note: ArrayMarshaler is only used when zap.Array is used or when +// passed directly to zap.Any. It is not used when reflection-based +// encoding is used. type ArrayMarshaler interface { MarshalLogArray(ArrayEncoder) error } diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go index e316418..25f10ca 100644 --- a/vendor/go.uber.org/zap/zapcore/sampler.go +++ b/vendor/go.uber.org/zap/zapcore/sampler.go @@ -81,33 +81,104 @@ func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 { return 1 } -type sampler struct { - Core +// SamplingDecision is a decision represented as a bit field made by sampler. +// More decisions may be added in the future. +type SamplingDecision uint32 - counts *counters - tick time.Duration - first, thereafter uint64 +const ( + // LogDropped indicates that the Sampler dropped a log entry. + LogDropped SamplingDecision = 1 << iota + // LogSampled indicates that the Sampler sampled a log entry. + LogSampled +) + +// optionFunc wraps a func so it satisfies the SamplerOption interface. +type optionFunc func(*sampler) + +func (f optionFunc) apply(s *sampler) { + f(s) +} + +// SamplerOption configures a Sampler. +type SamplerOption interface { + apply(*sampler) } -// NewSampler creates a Core that samples incoming entries, which caps the CPU -// and I/O load of logging while attempting to preserve a representative subset -// of your logs. +// nopSamplingHook is the default hook used by sampler. +func nopSamplingHook(Entry, SamplingDecision) {} + +// SamplerHook registers a function which will be called when Sampler makes a +// decision. +// +// This hook may be used to get visibility into the performance of the sampler. +// For example, use it to track metrics of dropped versus sampled logs. +// +// var dropped atomic.Int64 +// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) { +// if dec&zapcore.LogDropped > 0 { +// dropped.Inc() +// } +// }) +func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption { + return optionFunc(func(s *sampler) { + s.hook = hook + }) +} + +// NewSamplerWithOptions creates a Core that samples incoming entries, which +// caps the CPU and I/O load of logging while attempting to preserve a +// representative subset of your logs. // // Zap samples by logging the first N entries with a given level and message // each tick. If more Entries with the same level and message are seen during // the same interval, every Mth message is logged and the rest are dropped. // +// Sampler can be configured to report sampling decisions with the SamplerHook +// option. +// // Keep in mind that zap's sampling implementation is optimized for speed over // absolute precision; under load, each tick may be slightly over- or // under-sampled. -func NewSampler(core Core, tick time.Duration, first, thereafter int) Core { - return &sampler{ +func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core { + s := &sampler{ Core: core, tick: tick, counts: newCounters(), first: uint64(first), thereafter: uint64(thereafter), + hook: nopSamplingHook, } + for _, opt := range opts { + opt.apply(s) + } + + return s +} + +type sampler struct { + Core + + counts *counters + tick time.Duration + first, thereafter uint64 + hook func(Entry, SamplingDecision) +} + +// NewSampler creates a Core that samples incoming entries, which +// caps the CPU and I/O load of logging while attempting to preserve a +// representative subset of your logs. +// +// Zap samples by logging the first N entries with a given level and message +// each tick. If more Entries with the same level and message are seen during +// the same interval, every Mth message is logged and the rest are dropped. +// +// Keep in mind that zap's sampling implementation is optimized for speed over +// absolute precision; under load, each tick may be slightly over- or +// under-sampled. +// +// Deprecated: use NewSamplerWithOptions. +func NewSampler(core Core, tick time.Duration, first, thereafter int) Core { + return NewSamplerWithOptions(core, tick, first, thereafter) } func (s *sampler) With(fields []Field) Core { @@ -117,6 +188,7 @@ func (s *sampler) With(fields []Field) Core { counts: s.counts, first: s.first, thereafter: s.thereafter, + hook: s.hook, } } @@ -128,7 +200,9 @@ func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { counter := s.counts.get(ent.Level, ent.Message) n := counter.IncCheckReset(ent.Time, s.tick) if n > s.first && (n-s.first)%s.thereafter != 0 { + s.hook(ent, LogDropped) return ce } + s.hook(ent, LogSampled) return s.Core.Check(ent, ce) } diff --git a/vendor/golang.org/x/image/AUTHORS b/vendor/golang.org/x/image/AUTHORS new file mode 100644 index 0000000..15167cd --- /dev/null +++ b/vendor/golang.org/x/image/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/image/CONTRIBUTORS b/vendor/golang.org/x/image/CONTRIBUTORS new file mode 100644 index 0000000..1c4577e --- /dev/null +++ b/vendor/golang.org/x/image/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/image/LICENSE b/vendor/golang.org/x/image/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/image/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/image/PATENTS b/vendor/golang.org/x/image/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/image/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/image/bmp/reader.go b/vendor/golang.org/x/image/bmp/reader.go new file mode 100644 index 0000000..52e2520 --- /dev/null +++ b/vendor/golang.org/x/image/bmp/reader.go @@ -0,0 +1,219 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bmp implements a BMP image decoder and encoder. +// +// The BMP specification is at http://www.digicamsoft.com/bmp/bmp.html. +package bmp // import "golang.org/x/image/bmp" + +import ( + "errors" + "image" + "image/color" + "io" +) + +// ErrUnsupported means that the input BMP image uses a valid but unsupported +// feature. +var ErrUnsupported = errors.New("bmp: unsupported BMP image") + +func readUint16(b []byte) uint16 { + return uint16(b[0]) | uint16(b[1])<<8 +} + +func readUint32(b []byte) uint32 { + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +// decodePaletted reads an 8 bit-per-pixel BMP image from r. +// If topDown is false, the image rows will be read bottom-up. +func decodePaletted(r io.Reader, c image.Config, topDown bool) (image.Image, error) { + paletted := image.NewPaletted(image.Rect(0, 0, c.Width, c.Height), c.ColorModel.(color.Palette)) + if c.Width == 0 || c.Height == 0 { + return paletted, nil + } + var tmp [4]byte + y0, y1, yDelta := c.Height-1, -1, -1 + if topDown { + y0, y1, yDelta = 0, c.Height, +1 + } + for y := y0; y != y1; y += yDelta { + p := paletted.Pix[y*paletted.Stride : y*paletted.Stride+c.Width] + if _, err := io.ReadFull(r, p); err != nil { + return nil, err + } + // Each row is 4-byte aligned. + if c.Width%4 != 0 { + _, err := io.ReadFull(r, tmp[:4-c.Width%4]) + if err != nil { + return nil, err + } + } + } + return paletted, nil +} + +// decodeRGB reads a 24 bit-per-pixel BMP image from r. +// If topDown is false, the image rows will be read bottom-up. +func decodeRGB(r io.Reader, c image.Config, topDown bool) (image.Image, error) { + rgba := image.NewRGBA(image.Rect(0, 0, c.Width, c.Height)) + if c.Width == 0 || c.Height == 0 { + return rgba, nil + } + // There are 3 bytes per pixel, and each row is 4-byte aligned. + b := make([]byte, (3*c.Width+3)&^3) + y0, y1, yDelta := c.Height-1, -1, -1 + if topDown { + y0, y1, yDelta = 0, c.Height, +1 + } + for y := y0; y != y1; y += yDelta { + if _, err := io.ReadFull(r, b); err != nil { + return nil, err + } + p := rgba.Pix[y*rgba.Stride : y*rgba.Stride+c.Width*4] + for i, j := 0, 0; i < len(p); i, j = i+4, j+3 { + // BMP images are stored in BGR order rather than RGB order. + p[i+0] = b[j+2] + p[i+1] = b[j+1] + p[i+2] = b[j+0] + p[i+3] = 0xFF + } + } + return rgba, nil +} + +// decodeNRGBA reads a 32 bit-per-pixel BMP image from r. +// If topDown is false, the image rows will be read bottom-up. +func decodeNRGBA(r io.Reader, c image.Config, topDown bool) (image.Image, error) { + rgba := image.NewNRGBA(image.Rect(0, 0, c.Width, c.Height)) + if c.Width == 0 || c.Height == 0 { + return rgba, nil + } + y0, y1, yDelta := c.Height-1, -1, -1 + if topDown { + y0, y1, yDelta = 0, c.Height, +1 + } + for y := y0; y != y1; y += yDelta { + p := rgba.Pix[y*rgba.Stride : y*rgba.Stride+c.Width*4] + if _, err := io.ReadFull(r, p); err != nil { + return nil, err + } + for i := 0; i < len(p); i += 4 { + // BMP images are stored in BGRA order rather than RGBA order. + p[i+0], p[i+2] = p[i+2], p[i+0] + } + } + return rgba, nil +} + +// Decode reads a BMP image from r and returns it as an image.Image. +// Limitation: The file must be 8, 24 or 32 bits per pixel. +func Decode(r io.Reader) (image.Image, error) { + c, bpp, topDown, err := decodeConfig(r) + if err != nil { + return nil, err + } + switch bpp { + case 8: + return decodePaletted(r, c, topDown) + case 24: + return decodeRGB(r, c, topDown) + case 32: + return decodeNRGBA(r, c, topDown) + } + panic("unreachable") +} + +// DecodeConfig returns the color model and dimensions of a BMP image without +// decoding the entire image. +// Limitation: The file must be 8, 24 or 32 bits per pixel. +func DecodeConfig(r io.Reader) (image.Config, error) { + config, _, _, err := decodeConfig(r) + return config, err +} + +func decodeConfig(r io.Reader) (config image.Config, bitsPerPixel int, topDown bool, err error) { + // We only support those BMP images that are a BITMAPFILEHEADER + // immediately followed by a BITMAPINFOHEADER. + const ( + fileHeaderLen = 14 + infoHeaderLen = 40 + v4InfoHeaderLen = 108 + v5InfoHeaderLen = 124 + ) + var b [1024]byte + if _, err := io.ReadFull(r, b[:fileHeaderLen+4]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return image.Config{}, 0, false, err + } + if string(b[:2]) != "BM" { + return image.Config{}, 0, false, errors.New("bmp: invalid format") + } + offset := readUint32(b[10:14]) + infoLen := readUint32(b[14:18]) + if infoLen != infoHeaderLen && infoLen != v4InfoHeaderLen && infoLen != v5InfoHeaderLen { + return image.Config{}, 0, false, ErrUnsupported + } + if _, err := io.ReadFull(r, b[fileHeaderLen+4:fileHeaderLen+infoLen]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return image.Config{}, 0, false, err + } + width := int(int32(readUint32(b[18:22]))) + height := int(int32(readUint32(b[22:26]))) + if height < 0 { + height, topDown = -height, true + } + if width < 0 || height < 0 { + return image.Config{}, 0, false, ErrUnsupported + } + // We only support 1 plane and 8, 24 or 32 bits per pixel and no + // compression. + planes, bpp, compression := readUint16(b[26:28]), readUint16(b[28:30]), readUint32(b[30:34]) + // if compression is set to BITFIELDS, but the bitmask is set to the default bitmask + // that would be used if compression was set to 0, we can continue as if compression was 0 + if compression == 3 && infoLen > infoHeaderLen && + readUint32(b[54:58]) == 0xff0000 && readUint32(b[58:62]) == 0xff00 && + readUint32(b[62:66]) == 0xff && readUint32(b[66:70]) == 0xff000000 { + compression = 0 + } + if planes != 1 || compression != 0 { + return image.Config{}, 0, false, ErrUnsupported + } + switch bpp { + case 8: + if offset != fileHeaderLen+infoLen+256*4 { + return image.Config{}, 0, false, ErrUnsupported + } + _, err = io.ReadFull(r, b[:256*4]) + if err != nil { + return image.Config{}, 0, false, err + } + pcm := make(color.Palette, 256) + for i := range pcm { + // BMP images are stored in BGR order rather than RGB order. + // Every 4th byte is padding. + pcm[i] = color.RGBA{b[4*i+2], b[4*i+1], b[4*i+0], 0xFF} + } + return image.Config{ColorModel: pcm, Width: width, Height: height}, 8, topDown, nil + case 24: + if offset != fileHeaderLen+infoLen { + return image.Config{}, 0, false, ErrUnsupported + } + return image.Config{ColorModel: color.RGBAModel, Width: width, Height: height}, 24, topDown, nil + case 32: + if offset != fileHeaderLen+infoLen { + return image.Config{}, 0, false, ErrUnsupported + } + return image.Config{ColorModel: color.RGBAModel, Width: width, Height: height}, 32, topDown, nil + } + return image.Config{}, 0, false, ErrUnsupported +} + +func init() { + image.RegisterFormat("bmp", "BM????\x00\x00\x00\x00", Decode, DecodeConfig) +} diff --git a/vendor/golang.org/x/image/bmp/writer.go b/vendor/golang.org/x/image/bmp/writer.go new file mode 100644 index 0000000..f07b39d --- /dev/null +++ b/vendor/golang.org/x/image/bmp/writer.go @@ -0,0 +1,262 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bmp + +import ( + "encoding/binary" + "errors" + "image" + "io" +) + +type header struct { + sigBM [2]byte + fileSize uint32 + resverved [2]uint16 + pixOffset uint32 + dibHeaderSize uint32 + width uint32 + height uint32 + colorPlane uint16 + bpp uint16 + compression uint32 + imageSize uint32 + xPixelsPerMeter uint32 + yPixelsPerMeter uint32 + colorUse uint32 + colorImportant uint32 +} + +func encodePaletted(w io.Writer, pix []uint8, dx, dy, stride, step int) error { + var padding []byte + if dx < step { + padding = make([]byte, step-dx) + } + for y := dy - 1; y >= 0; y-- { + min := y*stride + 0 + max := y*stride + dx + if _, err := w.Write(pix[min:max]); err != nil { + return err + } + if padding != nil { + if _, err := w.Write(padding); err != nil { + return err + } + } + } + return nil +} + +func encodeRGBA(w io.Writer, pix []uint8, dx, dy, stride, step int, opaque bool) error { + buf := make([]byte, step) + if opaque { + for y := dy - 1; y >= 0; y-- { + min := y*stride + 0 + max := y*stride + dx*4 + off := 0 + for i := min; i < max; i += 4 { + buf[off+2] = pix[i+0] + buf[off+1] = pix[i+1] + buf[off+0] = pix[i+2] + off += 3 + } + if _, err := w.Write(buf); err != nil { + return err + } + } + } else { + for y := dy - 1; y >= 0; y-- { + min := y*stride + 0 + max := y*stride + dx*4 + off := 0 + for i := min; i < max; i += 4 { + a := uint32(pix[i+3]) + if a == 0 { + buf[off+2] = 0 + buf[off+1] = 0 + buf[off+0] = 0 + buf[off+3] = 0 + off += 4 + continue + } else if a == 0xff { + buf[off+2] = pix[i+0] + buf[off+1] = pix[i+1] + buf[off+0] = pix[i+2] + buf[off+3] = 0xff + off += 4 + continue + } + buf[off+2] = uint8(((uint32(pix[i+0]) * 0xffff) / a) >> 8) + buf[off+1] = uint8(((uint32(pix[i+1]) * 0xffff) / a) >> 8) + buf[off+0] = uint8(((uint32(pix[i+2]) * 0xffff) / a) >> 8) + buf[off+3] = uint8(a) + off += 4 + } + if _, err := w.Write(buf); err != nil { + return err + } + } + } + return nil +} + +func encodeNRGBA(w io.Writer, pix []uint8, dx, dy, stride, step int, opaque bool) error { + buf := make([]byte, step) + if opaque { + for y := dy - 1; y >= 0; y-- { + min := y*stride + 0 + max := y*stride + dx*4 + off := 0 + for i := min; i < max; i += 4 { + buf[off+2] = pix[i+0] + buf[off+1] = pix[i+1] + buf[off+0] = pix[i+2] + off += 3 + } + if _, err := w.Write(buf); err != nil { + return err + } + } + } else { + for y := dy - 1; y >= 0; y-- { + min := y*stride + 0 + max := y*stride + dx*4 + off := 0 + for i := min; i < max; i += 4 { + buf[off+2] = pix[i+0] + buf[off+1] = pix[i+1] + buf[off+0] = pix[i+2] + buf[off+3] = pix[i+3] + off += 4 + } + if _, err := w.Write(buf); err != nil { + return err + } + } + } + return nil +} + +func encode(w io.Writer, m image.Image, step int) error { + b := m.Bounds() + buf := make([]byte, step) + for y := b.Max.Y - 1; y >= b.Min.Y; y-- { + off := 0 + for x := b.Min.X; x < b.Max.X; x++ { + r, g, b, _ := m.At(x, y).RGBA() + buf[off+2] = byte(r >> 8) + buf[off+1] = byte(g >> 8) + buf[off+0] = byte(b >> 8) + off += 3 + } + if _, err := w.Write(buf); err != nil { + return err + } + } + return nil +} + +// Encode writes the image m to w in BMP format. +func Encode(w io.Writer, m image.Image) error { + d := m.Bounds().Size() + if d.X < 0 || d.Y < 0 { + return errors.New("bmp: negative bounds") + } + h := &header{ + sigBM: [2]byte{'B', 'M'}, + fileSize: 14 + 40, + pixOffset: 14 + 40, + dibHeaderSize: 40, + width: uint32(d.X), + height: uint32(d.Y), + colorPlane: 1, + } + + var step int + var palette []byte + var opaque bool + switch m := m.(type) { + case *image.Gray: + step = (d.X + 3) &^ 3 + palette = make([]byte, 1024) + for i := 0; i < 256; i++ { + palette[i*4+0] = uint8(i) + palette[i*4+1] = uint8(i) + palette[i*4+2] = uint8(i) + palette[i*4+3] = 0xFF + } + h.imageSize = uint32(d.Y * step) + h.fileSize += uint32(len(palette)) + h.imageSize + h.pixOffset += uint32(len(palette)) + h.bpp = 8 + + case *image.Paletted: + step = (d.X + 3) &^ 3 + palette = make([]byte, 1024) + for i := 0; i < len(m.Palette) && i < 256; i++ { + r, g, b, _ := m.Palette[i].RGBA() + palette[i*4+0] = uint8(b >> 8) + palette[i*4+1] = uint8(g >> 8) + palette[i*4+2] = uint8(r >> 8) + palette[i*4+3] = 0xFF + } + h.imageSize = uint32(d.Y * step) + h.fileSize += uint32(len(palette)) + h.imageSize + h.pixOffset += uint32(len(palette)) + h.bpp = 8 + case *image.RGBA: + opaque = m.Opaque() + if opaque { + step = (3*d.X + 3) &^ 3 + h.bpp = 24 + } else { + step = 4 * d.X + h.bpp = 32 + } + h.imageSize = uint32(d.Y * step) + h.fileSize += h.imageSize + case *image.NRGBA: + opaque = m.Opaque() + if opaque { + step = (3*d.X + 3) &^ 3 + h.bpp = 24 + } else { + step = 4 * d.X + h.bpp = 32 + } + h.imageSize = uint32(d.Y * step) + h.fileSize += h.imageSize + default: + step = (3*d.X + 3) &^ 3 + h.imageSize = uint32(d.Y * step) + h.fileSize += h.imageSize + h.bpp = 24 + } + + if err := binary.Write(w, binary.LittleEndian, h); err != nil { + return err + } + if palette != nil { + if err := binary.Write(w, binary.LittleEndian, palette); err != nil { + return err + } + } + + if d.X == 0 || d.Y == 0 { + return nil + } + + switch m := m.(type) { + case *image.Gray: + return encodePaletted(w, m.Pix, d.X, d.Y, m.Stride, step) + case *image.Paletted: + return encodePaletted(w, m.Pix, d.X, d.Y, m.Stride, step) + case *image.RGBA: + return encodeRGBA(w, m.Pix, d.X, d.Y, m.Stride, step, opaque) + case *image.NRGBA: + return encodeNRGBA(w, m.Pix, d.X, d.Y, m.Stride, step, opaque) + } + return encode(w, m, step) +} diff --git a/vendor/golang.org/x/image/ccitt/reader.go b/vendor/golang.org/x/image/ccitt/reader.go new file mode 100644 index 0000000..340de05 --- /dev/null +++ b/vendor/golang.org/x/image/ccitt/reader.go @@ -0,0 +1,795 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go + +// Package ccitt implements a CCITT (fax) image decoder. +package ccitt + +import ( + "encoding/binary" + "errors" + "image" + "io" + "math/bits" +) + +var ( + errIncompleteCode = errors.New("ccitt: incomplete code") + errInvalidBounds = errors.New("ccitt: invalid bounds") + errInvalidCode = errors.New("ccitt: invalid code") + errInvalidMode = errors.New("ccitt: invalid mode") + errInvalidOffset = errors.New("ccitt: invalid offset") + errMissingEOL = errors.New("ccitt: missing End-of-Line") + errRunLengthOverflowsWidth = errors.New("ccitt: run length overflows width") + errRunLengthTooLong = errors.New("ccitt: run length too long") + errUnsupportedMode = errors.New("ccitt: unsupported mode") + errUnsupportedSubFormat = errors.New("ccitt: unsupported sub-format") + errUnsupportedWidth = errors.New("ccitt: unsupported width") +) + +// Order specifies the bit ordering in a CCITT data stream. +type Order uint32 + +const ( + // LSB means Least Significant Bits first. + LSB Order = iota + // MSB means Most Significant Bits first. + MSB +) + +// SubFormat represents that the CCITT format consists of a number of +// sub-formats. Decoding or encoding a CCITT data stream requires knowing the +// sub-format context. It is not represented in the data stream per se. +type SubFormat uint32 + +const ( + Group3 SubFormat = iota + Group4 +) + +// AutoDetectHeight is passed as the height argument to NewReader to indicate +// that the image height (the number of rows) is not known in advance. +const AutoDetectHeight = -1 + +// Options are optional parameters. +type Options struct { + // Align means that some variable-bit-width codes are byte-aligned. + Align bool + // Invert means that black is the 1 bit or 0xFF byte, and white is 0. + Invert bool +} + +// maxWidth is the maximum (inclusive) supported width. This is a limitation of +// this implementation, to guard against integer overflow, and not anything +// inherent to the CCITT format. +const maxWidth = 1 << 20 + +func invertBytes(b []byte) { + for i, c := range b { + b[i] = ^c + } +} + +func reverseBitsWithinBytes(b []byte) { + for i, c := range b { + b[i] = bits.Reverse8(c) + } +} + +// highBits writes to dst (1 bit per pixel, most significant bit first) the +// high (0x80) bits from src (1 byte per pixel). It returns the number of bytes +// written and read such that dst[:d] is the packed form of src[:s]. +// +// For example, if src starts with the 8 bytes [0x7D, 0x7E, 0x7F, 0x80, 0x81, +// 0x82, 0x00, 0xFF] then 0x1D will be written to dst[0]. +// +// If src has (8 * len(dst)) or more bytes then only len(dst) bytes are +// written, (8 * len(dst)) bytes are read, and invert is ignored. +// +// Otherwise, if len(src) is not a multiple of 8 then the final byte written to +// dst is padded with 1 bits (if invert is true) or 0 bits. If inverted, the 1s +// are typically temporary, e.g. they will be flipped back to 0s by an +// invertBytes call in the highBits caller, reader.Read. +func highBits(dst []byte, src []byte, invert bool) (d int, s int) { + // Pack as many complete groups of 8 src bytes as we can. + n := len(src) / 8 + if n > len(dst) { + n = len(dst) + } + dstN := dst[:n] + for i := range dstN { + src8 := src[i*8 : i*8+8] + dstN[i] = ((src8[0] & 0x80) >> 0) | + ((src8[1] & 0x80) >> 1) | + ((src8[2] & 0x80) >> 2) | + ((src8[3] & 0x80) >> 3) | + ((src8[4] & 0x80) >> 4) | + ((src8[5] & 0x80) >> 5) | + ((src8[6] & 0x80) >> 6) | + ((src8[7] & 0x80) >> 7) + } + d, s = n, 8*n + dst, src = dst[d:], src[s:] + + // Pack up to 7 remaining src bytes, if there's room in dst. + if (len(dst) > 0) && (len(src) > 0) { + dstByte := byte(0) + if invert { + dstByte = 0xFF >> uint(len(src)) + } + for n, srcByte := range src { + dstByte |= (srcByte & 0x80) >> uint(n) + } + dst[0] = dstByte + d, s = d+1, s+len(src) + } + return d, s +} + +type bitReader struct { + r io.Reader + + // readErr is the error returned from the most recent r.Read call. As the + // io.Reader documentation says, when r.Read returns (n, err), "always + // process the n > 0 bytes returned before considering the error err". + readErr error + + // order is whether to process r's bytes LSB first or MSB first. + order Order + + // The high nBits bits of the bits field hold upcoming bits in MSB order. + bits uint64 + nBits uint32 + + // bytes[br:bw] holds bytes read from r but not yet loaded into bits. + br uint32 + bw uint32 + bytes [1024]uint8 +} + +func (b *bitReader) alignToByteBoundary() { + n := b.nBits & 7 + b.bits <<= n + b.nBits -= n +} + +// nextBitMaxNBits is the maximum possible value of bitReader.nBits after a +// bitReader.nextBit call, provided that bitReader.nBits was not more than this +// value before that call. +// +// Note that the decode function can unread bits, which can temporarily set the +// bitReader.nBits value above nextBitMaxNBits. +const nextBitMaxNBits = 31 + +func (b *bitReader) nextBit() (uint64, error) { + for { + if b.nBits > 0 { + bit := b.bits >> 63 + b.bits <<= 1 + b.nBits-- + return bit, nil + } + + if available := b.bw - b.br; available >= 4 { + // Read 32 bits, even though b.bits is a uint64, since the decode + // function may need to unread up to maxCodeLength bits, putting + // them back in the remaining (64 - 32) bits. TestMaxCodeLength + // checks that the generated maxCodeLength constant fits. + // + // If changing the Uint32 call, also change nextBitMaxNBits. + b.bits = uint64(binary.BigEndian.Uint32(b.bytes[b.br:])) << 32 + b.br += 4 + b.nBits = 32 + continue + } else if available > 0 { + b.bits = uint64(b.bytes[b.br]) << (7 * 8) + b.br++ + b.nBits = 8 + continue + } + + if b.readErr != nil { + return 0, b.readErr + } + + n, err := b.r.Read(b.bytes[:]) + b.br = 0 + b.bw = uint32(n) + b.readErr = err + + if b.order != MSB { + reverseBitsWithinBytes(b.bytes[:b.bw]) + } + } +} + +func decode(b *bitReader, decodeTable [][2]int16) (uint32, error) { + nBitsRead, bitsRead, state := uint32(0), uint64(0), int32(1) + for { + bit, err := b.nextBit() + if err != nil { + if err == io.EOF { + err = errIncompleteCode + } + return 0, err + } + bitsRead |= bit << (63 - nBitsRead) + nBitsRead++ + + // The "&1" is redundant, but can eliminate a bounds check. + state = int32(decodeTable[state][bit&1]) + if state < 0 { + return uint32(^state), nil + } else if state == 0 { + // Unread the bits we've read, then return errInvalidCode. + b.bits = (b.bits >> nBitsRead) | bitsRead + b.nBits += nBitsRead + return 0, errInvalidCode + } + } +} + +// decodeEOL decodes the 12-bit EOL code 0000_0000_0001. +func decodeEOL(b *bitReader) error { + nBitsRead, bitsRead := uint32(0), uint64(0) + for { + bit, err := b.nextBit() + if err != nil { + if err == io.EOF { + err = errMissingEOL + } + return err + } + bitsRead |= bit << (63 - nBitsRead) + nBitsRead++ + + if nBitsRead < 12 { + if bit&1 == 0 { + continue + } + } else if bit&1 != 0 { + return nil + } + + // Unread the bits we've read, then return errMissingEOL. + b.bits = (b.bits >> nBitsRead) | bitsRead + b.nBits += nBitsRead + return errMissingEOL + } +} + +type reader struct { + br bitReader + subFormat SubFormat + + // width is the image width in pixels. + width int + + // rowsRemaining starts at the image height in pixels, when the reader is + // driven through the io.Reader interface, and decrements to zero as rows + // are decoded. Alternatively, it may be negative if the image height is + // not known in advance at the time of the NewReader call. + // + // When driven through DecodeIntoGray, this field is unused. + rowsRemaining int + + // curr and prev hold the current and previous rows. Each element is either + // 0x00 (black) or 0xFF (white). + // + // prev may be nil, when processing the first row. + curr []byte + prev []byte + + // ri is the read index. curr[:ri] are those bytes of curr that have been + // passed along via the Read method. + // + // When the reader is driven through DecodeIntoGray, instead of through the + // io.Reader interface, this field is unused. + ri int + + // wi is the write index. curr[:wi] are those bytes of curr that have + // already been decoded via the decodeRow method. + // + // What this implementation calls wi is roughly equivalent to what the spec + // calls the a0 index. + wi int + + // These fields are copied from the *Options (which may be nil). + align bool + invert bool + + // atStartOfRow is whether we have just started the row. Some parts of the + // spec say to treat this situation as if "wi = -1". + atStartOfRow bool + + // penColorIsWhite is whether the next run is black or white. + penColorIsWhite bool + + // seenStartOfImage is whether we've called the startDecode method. + seenStartOfImage bool + + // truncated is whether the input is missing the final 6 consecutive EOL's + // (for Group3) or 2 consecutive EOL's (for Group4). Omitting that trailer + // (but otherwise padding to a byte boundary, with either all 0 bits or all + // 1 bits) is invalid according to the spec, but happens in practice when + // exporting from Adobe Acrobat to TIFF + CCITT. This package silently + // ignores the format error for CCITT input that has been truncated in that + // fashion, returning the full decoded image. + // + // Detecting trailer truncation (just after the final row of pixels) + // requires knowing which row is the final row, and therefore does not + // trigger if the image height is not known in advance. + truncated bool + + // readErr is a sticky error for the Read method. + readErr error +} + +func (z *reader) Read(p []byte) (int, error) { + if z.readErr != nil { + return 0, z.readErr + } + originalP := p + + for len(p) > 0 { + // Allocate buffers (and decode any start-of-image codes), if + // processing the first or second row. + if z.curr == nil { + if !z.seenStartOfImage { + if z.readErr = z.startDecode(); z.readErr != nil { + break + } + z.atStartOfRow = true + } + z.curr = make([]byte, z.width) + } + + // Decode the next row, if necessary. + if z.atStartOfRow { + if z.rowsRemaining < 0 { + // We do not know the image height in advance. See if the next + // code is an EOL. If it is, it is consumed. If it isn't, the + // bitReader shouldn't advance along the bit stream, and we + // simply decode another row of pixel data. + // + // For the Group4 subFormat, we may need to align to a byte + // boundary. For the Group3 subFormat, the previous z.decodeRow + // call (or z.startDecode call) has already consumed one of the + // 6 consecutive EOL's. The next EOL is actually the second of + // 6, in the middle, and we shouldn't align at that point. + if z.align && (z.subFormat == Group4) { + z.br.alignToByteBoundary() + } + + if err := z.decodeEOL(); err == errMissingEOL { + // No-op. It's another row of pixel data. + } else if err != nil { + z.readErr = err + break + } else { + if z.readErr = z.finishDecode(true); z.readErr != nil { + break + } + z.readErr = io.EOF + break + } + + } else if z.rowsRemaining == 0 { + // We do know the image height in advance, and we have already + // decoded exactly that many rows. + if z.readErr = z.finishDecode(false); z.readErr != nil { + break + } + z.readErr = io.EOF + break + + } else { + z.rowsRemaining-- + } + + if z.readErr = z.decodeRow(z.rowsRemaining == 0); z.readErr != nil { + break + } + } + + // Pack from z.curr (1 byte per pixel) to p (1 bit per pixel). + packD, packS := highBits(p, z.curr[z.ri:], z.invert) + p = p[packD:] + z.ri += packS + + // Prepare to decode the next row, if necessary. + if z.ri == len(z.curr) { + z.ri, z.curr, z.prev = 0, z.prev, z.curr + z.atStartOfRow = true + } + } + + n := len(originalP) - len(p) + if z.invert { + invertBytes(originalP[:n]) + } + return n, z.readErr +} + +func (z *reader) penColor() byte { + if z.penColorIsWhite { + return 0xFF + } + return 0x00 +} + +func (z *reader) startDecode() error { + switch z.subFormat { + case Group3: + if err := z.decodeEOL(); err != nil { + return err + } + + case Group4: + // No-op. + + default: + return errUnsupportedSubFormat + } + + z.seenStartOfImage = true + return nil +} + +func (z *reader) finishDecode(alreadySeenEOL bool) error { + numberOfEOLs := 0 + switch z.subFormat { + case Group3: + if z.truncated { + return nil + } + // The stream ends with a RTC (Return To Control) of 6 consecutive + // EOL's, but we should have already just seen an EOL, either in + // z.startDecode (for a zero-height image) or in z.decodeRow. + numberOfEOLs = 5 + + case Group4: + autoDetectHeight := z.rowsRemaining < 0 + if autoDetectHeight { + // Aligning to a byte boundary was already handled by reader.Read. + } else if z.align { + z.br.alignToByteBoundary() + } + // The stream ends with two EOL's. If the first one is missing, and we + // had an explicit image height, we just assume that the trailing two + // EOL's were truncated and return a nil error. + if err := z.decodeEOL(); err != nil { + if (err == errMissingEOL) && !autoDetectHeight { + z.truncated = true + return nil + } + return err + } + numberOfEOLs = 1 + + default: + return errUnsupportedSubFormat + } + + if alreadySeenEOL { + numberOfEOLs-- + } + for ; numberOfEOLs > 0; numberOfEOLs-- { + if err := z.decodeEOL(); err != nil { + return err + } + } + return nil +} + +func (z *reader) decodeEOL() error { + return decodeEOL(&z.br) +} + +func (z *reader) decodeRow(finalRow bool) error { + z.wi = 0 + z.atStartOfRow = true + z.penColorIsWhite = true + + if z.align { + z.br.alignToByteBoundary() + } + + switch z.subFormat { + case Group3: + for ; z.wi < len(z.curr); z.atStartOfRow = false { + if err := z.decodeRun(); err != nil { + return err + } + } + err := z.decodeEOL() + if finalRow && (err == errMissingEOL) { + z.truncated = true + return nil + } + return err + + case Group4: + for ; z.wi < len(z.curr); z.atStartOfRow = false { + mode, err := decode(&z.br, modeDecodeTable[:]) + if err != nil { + return err + } + rm := readerMode{} + if mode < uint32(len(readerModes)) { + rm = readerModes[mode] + } + if rm.function == nil { + return errInvalidMode + } + if err := rm.function(z, rm.arg); err != nil { + return err + } + } + return nil + } + + return errUnsupportedSubFormat +} + +func (z *reader) decodeRun() error { + table := blackDecodeTable[:] + if z.penColorIsWhite { + table = whiteDecodeTable[:] + } + + total := 0 + for { + n, err := decode(&z.br, table) + if err != nil { + return err + } + if n > maxWidth { + panic("unreachable") + } + total += int(n) + if total > maxWidth { + return errRunLengthTooLong + } + // Anything 0x3F or below is a terminal code. + if n <= 0x3F { + break + } + } + + if total > (len(z.curr) - z.wi) { + return errRunLengthOverflowsWidth + } + dst := z.curr[z.wi : z.wi+total] + penColor := z.penColor() + for i := range dst { + dst[i] = penColor + } + z.wi += total + z.penColorIsWhite = !z.penColorIsWhite + + return nil +} + +// The various modes' semantics are based on determining a row of pixels' +// "changing elements": those pixels whose color differs from the one on its +// immediate left. +// +// The row above the first row is implicitly all white. Similarly, the column +// to the left of the first column is implicitly all white. +// +// For example, here's Figure 1 in "ITU-T Recommendation T.6", where the +// current and previous rows contain black (B) and white (w) pixels. The a? +// indexes point into curr, the b? indexes point into prev. +// +// b1 b2 +// v v +// prev: BBBBBwwwwwBBBwwwww +// curr: BBBwwwwwBBBBBBwwww +// ^ ^ ^ +// a0 a1 a2 +// +// a0 is the "reference element" or current decoder position, roughly +// equivalent to what this implementation calls reader.wi. +// +// a1 is the next changing element to the right of a0, on the "coding line" +// (the current row). +// +// a2 is the next changing element to the right of a1, again on curr. +// +// b1 is the first changing element on the "reference line" (the previous row) +// to the right of a0 and of opposite color to a0. +// +// b2 is the next changing element to the right of b1, again on prev. +// +// The various modes calculate a1 (and a2, for modeH): +// - modePass calculates that a1 is at or to the right of b2. +// - modeH calculates a1 and a2 without considering b1 or b2. +// - modeV* calculates a1 to be b1 plus an adjustment (between -3 and +3). + +const ( + findB1 = false + findB2 = true +) + +// findB finds either the b1 or b2 value. +func (z *reader) findB(whichB bool) int { + // The initial row is a special case. The previous row is implicitly all + // white, so that there are no changing pixel elements. We return b1 or b2 + // to be at the end of the row. + if len(z.prev) != len(z.curr) { + return len(z.curr) + } + + i := z.wi + + if z.atStartOfRow { + // a0 is implicitly at -1, on a white pixel. b1 is the first black + // pixel in the previous row. b2 is the first white pixel after that. + for ; (i < len(z.prev)) && (z.prev[i] == 0xFF); i++ { + } + if whichB == findB2 { + for ; (i < len(z.prev)) && (z.prev[i] == 0x00); i++ { + } + } + return i + } + + // As per figure 1 above, assume that the current pen color is white. + // First, walk past every contiguous black pixel in prev, starting at a0. + oppositeColor := ^z.penColor() + for ; (i < len(z.prev)) && (z.prev[i] == oppositeColor); i++ { + } + + // Then walk past every contiguous white pixel. + penColor := ^oppositeColor + for ; (i < len(z.prev)) && (z.prev[i] == penColor); i++ { + } + + // We're now at a black pixel (or at the end of the row). That's b1. + if whichB == findB2 { + // If we're looking for b2, walk past every contiguous black pixel + // again. + oppositeColor := ^penColor + for ; (i < len(z.prev)) && (z.prev[i] == oppositeColor); i++ { + } + } + + return i +} + +type readerMode struct { + function func(z *reader, arg int) error + arg int +} + +var readerModes = [...]readerMode{ + modePass: {function: readerModePass}, + modeH: {function: readerModeH}, + modeV0: {function: readerModeV, arg: +0}, + modeVR1: {function: readerModeV, arg: +1}, + modeVR2: {function: readerModeV, arg: +2}, + modeVR3: {function: readerModeV, arg: +3}, + modeVL1: {function: readerModeV, arg: -1}, + modeVL2: {function: readerModeV, arg: -2}, + modeVL3: {function: readerModeV, arg: -3}, + modeExt: {function: readerModeExt}, +} + +func readerModePass(z *reader, arg int) error { + b2 := z.findB(findB2) + if (b2 < z.wi) || (len(z.curr) < b2) { + return errInvalidOffset + } + dst := z.curr[z.wi:b2] + penColor := z.penColor() + for i := range dst { + dst[i] = penColor + } + z.wi = b2 + return nil +} + +func readerModeH(z *reader, arg int) error { + // The first iteration finds a1. The second finds a2. + for i := 0; i < 2; i++ { + if err := z.decodeRun(); err != nil { + return err + } + } + return nil +} + +func readerModeV(z *reader, arg int) error { + a1 := z.findB(findB1) + arg + if (a1 < z.wi) || (len(z.curr) < a1) { + return errInvalidOffset + } + dst := z.curr[z.wi:a1] + penColor := z.penColor() + for i := range dst { + dst[i] = penColor + } + z.wi = a1 + z.penColorIsWhite = !z.penColorIsWhite + return nil +} + +func readerModeExt(z *reader, arg int) error { + return errUnsupportedMode +} + +// DecodeIntoGray decodes the CCITT-formatted data in r into dst. +// +// It returns an error if dst's width and height don't match the implied width +// and height of CCITT-formatted data. +func DecodeIntoGray(dst *image.Gray, r io.Reader, order Order, sf SubFormat, opts *Options) error { + bounds := dst.Bounds() + if (bounds.Dx() < 0) || (bounds.Dy() < 0) { + return errInvalidBounds + } + if bounds.Dx() > maxWidth { + return errUnsupportedWidth + } + + z := reader{ + br: bitReader{r: r, order: order}, + subFormat: sf, + align: (opts != nil) && opts.Align, + invert: (opts != nil) && opts.Invert, + width: bounds.Dx(), + } + if err := z.startDecode(); err != nil { + return err + } + + width := bounds.Dx() + for y := bounds.Min.Y; y < bounds.Max.Y; y++ { + p := (y - bounds.Min.Y) * dst.Stride + z.curr = dst.Pix[p : p+width] + if err := z.decodeRow(y+1 == bounds.Max.Y); err != nil { + return err + } + z.curr, z.prev = nil, z.curr + } + + if err := z.finishDecode(false); err != nil { + return err + } + + if z.invert { + for y := bounds.Min.Y; y < bounds.Max.Y; y++ { + p := (y - bounds.Min.Y) * dst.Stride + invertBytes(dst.Pix[p : p+width]) + } + } + + return nil +} + +// NewReader returns an io.Reader that decodes the CCITT-formatted data in r. +// The resultant byte stream is one bit per pixel (MSB first), with 1 meaning +// white and 0 meaning black. Each row in the result is byte-aligned. +// +// A negative height, such as passing AutoDetectHeight, means that the image +// height is not known in advance. A negative width is invalid. +func NewReader(r io.Reader, order Order, sf SubFormat, width int, height int, opts *Options) io.Reader { + readErr := error(nil) + if width < 0 { + readErr = errInvalidBounds + } else if width > maxWidth { + readErr = errUnsupportedWidth + } + + return &reader{ + br: bitReader{r: r, order: order}, + subFormat: sf, + align: (opts != nil) && opts.Align, + invert: (opts != nil) && opts.Invert, + width: width, + rowsRemaining: height, + readErr: readErr, + } +} diff --git a/vendor/golang.org/x/image/ccitt/table.go b/vendor/golang.org/x/image/ccitt/table.go new file mode 100644 index 0000000..ef7ea9d --- /dev/null +++ b/vendor/golang.org/x/image/ccitt/table.go @@ -0,0 +1,972 @@ +// generated by "go run gen.go". DO NOT EDIT. + +package ccitt + +// Each decodeTable is represented by an array of [2]int16's: a binary tree. +// Each array element (other than element 0, which means invalid) is a branch +// node in that tree. The root node is always element 1 (the second element). +// +// To walk the tree, look at the next bit in the bit stream, using it to select +// the first or second element of the [2]int16. If that int16 is 0, we have an +// invalid code. If it is positive, go to that branch node. If it is negative, +// then we have a leaf node, whose value is the bitwise complement (the ^ +// operator) of that int16. +// +// Comments above each decodeTable also show the same structure visually. The +// "b123" lines show the 123'rd branch node. The "=XXXXX" lines show an invalid +// code. The "=v1234" lines show a leaf node with value 1234. When reading the +// bit stream, a 0 or 1 bit means to go up or down, as you move left to right. +// +// For example, in modeDecodeTable, branch node b005 is three steps up from the +// root node, meaning that we have already seen "000". If the next bit is "0" +// then we move to branch node b006. Otherwise, the next bit is "1", and we +// move to the leaf node v0000 (also known as the modePass constant). Indeed, +// the bits that encode modePass are "0001". +// +// Tables 1, 2 and 3 come from the "ITU-T Recommendation T.6: FACSIMILE CODING +// SCHEMES AND CODING CONTROL FUNCTIONS FOR GROUP 4 FACSIMILE APPARATUS" +// specification: +// +// https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-T.6-198811-I!!PDF-E&type=items + +// modeDecodeTable represents Table 1 and the End-of-Line code. +// +// +=XXXXX +// b009 +-+ +// | +=v0009 +// b007 +-+ +// | | +=v0008 +// b010 | +-+ +// | +=v0005 +// b006 +-+ +// | | +=v0007 +// b008 | +-+ +// | +=v0004 +// b005 +-+ +// | +=v0000 +// b003 +-+ +// | +=v0001 +// b002 +-+ +// | | +=v0006 +// b004 | +-+ +// | +=v0003 +// b001 +-+ +// +=v0002 +var modeDecodeTable = [...][2]int16{ + 0: {0, 0}, + 1: {2, ^2}, + 2: {3, 4}, + 3: {5, ^1}, + 4: {^6, ^3}, + 5: {6, ^0}, + 6: {7, 8}, + 7: {9, 10}, + 8: {^7, ^4}, + 9: {0, ^9}, + 10: {^8, ^5}, +} + +// whiteDecodeTable represents Tables 2 and 3 for a white run. +// +// +=XXXXX +// b059 +-+ +// | | +=v1792 +// b096 | | +-+ +// | | | | +=v1984 +// b100 | | | +-+ +// | | | +=v2048 +// b094 | | +-+ +// | | | | +=v2112 +// b101 | | | | +-+ +// | | | | | +=v2176 +// b097 | | | +-+ +// | | | | +=v2240 +// b102 | | | +-+ +// | | | +=v2304 +// b085 | +-+ +// | | +=v1856 +// b098 | | +-+ +// | | | +=v1920 +// b095 | +-+ +// | | +=v2368 +// b103 | | +-+ +// | | | +=v2432 +// b099 | +-+ +// | | +=v2496 +// b104 | +-+ +// | +=v2560 +// b040 +-+ +// | | +=v0029 +// b060 | +-+ +// | +=v0030 +// b026 +-+ +// | | +=v0045 +// b061 | | +-+ +// | | | +=v0046 +// b041 | +-+ +// | +=v0022 +// b016 +-+ +// | | +=v0023 +// b042 | | +-+ +// | | | | +=v0047 +// b062 | | | +-+ +// | | | +=v0048 +// b027 | +-+ +// | +=v0013 +// b008 +-+ +// | | +=v0020 +// b043 | | +-+ +// | | | | +=v0033 +// b063 | | | +-+ +// | | | +=v0034 +// b028 | | +-+ +// | | | | +=v0035 +// b064 | | | | +-+ +// | | | | | +=v0036 +// b044 | | | +-+ +// | | | | +=v0037 +// b065 | | | +-+ +// | | | +=v0038 +// b017 | +-+ +// | | +=v0019 +// b045 | | +-+ +// | | | | +=v0031 +// b066 | | | +-+ +// | | | +=v0032 +// b029 | +-+ +// | +=v0001 +// b004 +-+ +// | | +=v0012 +// b030 | | +-+ +// | | | | +=v0053 +// b067 | | | | +-+ +// | | | | | +=v0054 +// b046 | | | +-+ +// | | | +=v0026 +// b018 | | +-+ +// | | | | +=v0039 +// b068 | | | | +-+ +// | | | | | +=v0040 +// b047 | | | | +-+ +// | | | | | | +=v0041 +// b069 | | | | | +-+ +// | | | | | +=v0042 +// b031 | | | +-+ +// | | | | +=v0043 +// b070 | | | | +-+ +// | | | | | +=v0044 +// b048 | | | +-+ +// | | | +=v0021 +// b009 | +-+ +// | | +=v0028 +// b049 | | +-+ +// | | | | +=v0061 +// b071 | | | +-+ +// | | | +=v0062 +// b032 | | +-+ +// | | | | +=v0063 +// b072 | | | | +-+ +// | | | | | +=v0000 +// b050 | | | +-+ +// | | | | +=v0320 +// b073 | | | +-+ +// | | | +=v0384 +// b019 | +-+ +// | +=v0010 +// b002 +-+ +// | | +=v0011 +// b020 | | +-+ +// | | | | +=v0027 +// b051 | | | | +-+ +// | | | | | | +=v0059 +// b074 | | | | | +-+ +// | | | | | +=v0060 +// b033 | | | +-+ +// | | | | +=v1472 +// b086 | | | | +-+ +// | | | | | +=v1536 +// b075 | | | | +-+ +// | | | | | | +=v1600 +// b087 | | | | | +-+ +// | | | | | +=v1728 +// b052 | | | +-+ +// | | | +=v0018 +// b010 | | +-+ +// | | | | +=v0024 +// b053 | | | | +-+ +// | | | | | | +=v0049 +// b076 | | | | | +-+ +// | | | | | +=v0050 +// b034 | | | | +-+ +// | | | | | | +=v0051 +// b077 | | | | | | +-+ +// | | | | | | | +=v0052 +// b054 | | | | | +-+ +// | | | | | +=v0025 +// b021 | | | +-+ +// | | | | +=v0055 +// b078 | | | | +-+ +// | | | | | +=v0056 +// b055 | | | | +-+ +// | | | | | | +=v0057 +// b079 | | | | | +-+ +// | | | | | +=v0058 +// b035 | | | +-+ +// | | | +=v0192 +// b005 | +-+ +// | | +=v1664 +// b036 | | +-+ +// | | | | +=v0448 +// b080 | | | | +-+ +// | | | | | +=v0512 +// b056 | | | +-+ +// | | | | +=v0704 +// b088 | | | | +-+ +// | | | | | +=v0768 +// b081 | | | +-+ +// | | | +=v0640 +// b022 | | +-+ +// | | | | +=v0576 +// b082 | | | | +-+ +// | | | | | | +=v0832 +// b089 | | | | | +-+ +// | | | | | +=v0896 +// b057 | | | | +-+ +// | | | | | | +=v0960 +// b090 | | | | | | +-+ +// | | | | | | | +=v1024 +// b083 | | | | | +-+ +// | | | | | | +=v1088 +// b091 | | | | | +-+ +// | | | | | +=v1152 +// b037 | | | +-+ +// | | | | +=v1216 +// b092 | | | | +-+ +// | | | | | +=v1280 +// b084 | | | | +-+ +// | | | | | | +=v1344 +// b093 | | | | | +-+ +// | | | | | +=v1408 +// b058 | | | +-+ +// | | | +=v0256 +// b011 | +-+ +// | +=v0002 +// b001 +-+ +// | +=v0003 +// b012 | +-+ +// | | | +=v0128 +// b023 | | +-+ +// | | +=v0008 +// b006 | +-+ +// | | | +=v0009 +// b024 | | | +-+ +// | | | | | +=v0016 +// b038 | | | | +-+ +// | | | | +=v0017 +// b013 | | +-+ +// | | +=v0004 +// b003 +-+ +// | +=v0005 +// b014 | +-+ +// | | | +=v0014 +// b039 | | | +-+ +// | | | | +=v0015 +// b025 | | +-+ +// | | +=v0064 +// b007 +-+ +// | +=v0006 +// b015 +-+ +// +=v0007 +var whiteDecodeTable = [...][2]int16{ + 0: {0, 0}, + 1: {2, 3}, + 2: {4, 5}, + 3: {6, 7}, + 4: {8, 9}, + 5: {10, 11}, + 6: {12, 13}, + 7: {14, 15}, + 8: {16, 17}, + 9: {18, 19}, + 10: {20, 21}, + 11: {22, ^2}, + 12: {^3, 23}, + 13: {24, ^4}, + 14: {^5, 25}, + 15: {^6, ^7}, + 16: {26, 27}, + 17: {28, 29}, + 18: {30, 31}, + 19: {32, ^10}, + 20: {^11, 33}, + 21: {34, 35}, + 22: {36, 37}, + 23: {^128, ^8}, + 24: {^9, 38}, + 25: {39, ^64}, + 26: {40, 41}, + 27: {42, ^13}, + 28: {43, 44}, + 29: {45, ^1}, + 30: {^12, 46}, + 31: {47, 48}, + 32: {49, 50}, + 33: {51, 52}, + 34: {53, 54}, + 35: {55, ^192}, + 36: {^1664, 56}, + 37: {57, 58}, + 38: {^16, ^17}, + 39: {^14, ^15}, + 40: {59, 60}, + 41: {61, ^22}, + 42: {^23, 62}, + 43: {^20, 63}, + 44: {64, 65}, + 45: {^19, 66}, + 46: {67, ^26}, + 47: {68, 69}, + 48: {70, ^21}, + 49: {^28, 71}, + 50: {72, 73}, + 51: {^27, 74}, + 52: {75, ^18}, + 53: {^24, 76}, + 54: {77, ^25}, + 55: {78, 79}, + 56: {80, 81}, + 57: {82, 83}, + 58: {84, ^256}, + 59: {0, 85}, + 60: {^29, ^30}, + 61: {^45, ^46}, + 62: {^47, ^48}, + 63: {^33, ^34}, + 64: {^35, ^36}, + 65: {^37, ^38}, + 66: {^31, ^32}, + 67: {^53, ^54}, + 68: {^39, ^40}, + 69: {^41, ^42}, + 70: {^43, ^44}, + 71: {^61, ^62}, + 72: {^63, ^0}, + 73: {^320, ^384}, + 74: {^59, ^60}, + 75: {86, 87}, + 76: {^49, ^50}, + 77: {^51, ^52}, + 78: {^55, ^56}, + 79: {^57, ^58}, + 80: {^448, ^512}, + 81: {88, ^640}, + 82: {^576, 89}, + 83: {90, 91}, + 84: {92, 93}, + 85: {94, 95}, + 86: {^1472, ^1536}, + 87: {^1600, ^1728}, + 88: {^704, ^768}, + 89: {^832, ^896}, + 90: {^960, ^1024}, + 91: {^1088, ^1152}, + 92: {^1216, ^1280}, + 93: {^1344, ^1408}, + 94: {96, 97}, + 95: {98, 99}, + 96: {^1792, 100}, + 97: {101, 102}, + 98: {^1856, ^1920}, + 99: {103, 104}, + 100: {^1984, ^2048}, + 101: {^2112, ^2176}, + 102: {^2240, ^2304}, + 103: {^2368, ^2432}, + 104: {^2496, ^2560}, +} + +// blackDecodeTable represents Tables 2 and 3 for a black run. +// +// +=XXXXX +// b017 +-+ +// | | +=v1792 +// b042 | | +-+ +// | | | | +=v1984 +// b063 | | | +-+ +// | | | +=v2048 +// b029 | | +-+ +// | | | | +=v2112 +// b064 | | | | +-+ +// | | | | | +=v2176 +// b043 | | | +-+ +// | | | | +=v2240 +// b065 | | | +-+ +// | | | +=v2304 +// b022 | +-+ +// | | +=v1856 +// b044 | | +-+ +// | | | +=v1920 +// b030 | +-+ +// | | +=v2368 +// b066 | | +-+ +// | | | +=v2432 +// b045 | +-+ +// | | +=v2496 +// b067 | +-+ +// | +=v2560 +// b013 +-+ +// | | +=v0018 +// b031 | | +-+ +// | | | | +=v0052 +// b068 | | | | +-+ +// | | | | | | +=v0640 +// b095 | | | | | +-+ +// | | | | | +=v0704 +// b046 | | | +-+ +// | | | | +=v0768 +// b096 | | | | +-+ +// | | | | | +=v0832 +// b069 | | | +-+ +// | | | +=v0055 +// b023 | | +-+ +// | | | | +=v0056 +// b070 | | | | +-+ +// | | | | | | +=v1280 +// b097 | | | | | +-+ +// | | | | | +=v1344 +// b047 | | | | +-+ +// | | | | | | +=v1408 +// b098 | | | | | | +-+ +// | | | | | | | +=v1472 +// b071 | | | | | +-+ +// | | | | | +=v0059 +// b032 | | | +-+ +// | | | | +=v0060 +// b072 | | | | +-+ +// | | | | | | +=v1536 +// b099 | | | | | +-+ +// | | | | | +=v1600 +// b048 | | | +-+ +// | | | +=v0024 +// b018 | +-+ +// | | +=v0025 +// b049 | | +-+ +// | | | | +=v1664 +// b100 | | | | +-+ +// | | | | | +=v1728 +// b073 | | | +-+ +// | | | +=v0320 +// b033 | | +-+ +// | | | | +=v0384 +// b074 | | | | +-+ +// | | | | | +=v0448 +// b050 | | | +-+ +// | | | | +=v0512 +// b101 | | | | +-+ +// | | | | | +=v0576 +// b075 | | | +-+ +// | | | +=v0053 +// b024 | +-+ +// | | +=v0054 +// b076 | | +-+ +// | | | | +=v0896 +// b102 | | | +-+ +// | | | +=v0960 +// b051 | | +-+ +// | | | | +=v1024 +// b103 | | | | +-+ +// | | | | | +=v1088 +// b077 | | | +-+ +// | | | | +=v1152 +// b104 | | | +-+ +// | | | +=v1216 +// b034 | +-+ +// | +=v0064 +// b010 +-+ +// | | +=v0013 +// b019 | | +-+ +// | | | | +=v0023 +// b052 | | | | +-+ +// | | | | | | +=v0050 +// b078 | | | | | +-+ +// | | | | | +=v0051 +// b035 | | | | +-+ +// | | | | | | +=v0044 +// b079 | | | | | | +-+ +// | | | | | | | +=v0045 +// b053 | | | | | +-+ +// | | | | | | +=v0046 +// b080 | | | | | +-+ +// | | | | | +=v0047 +// b025 | | | +-+ +// | | | | +=v0057 +// b081 | | | | +-+ +// | | | | | +=v0058 +// b054 | | | | +-+ +// | | | | | | +=v0061 +// b082 | | | | | +-+ +// | | | | | +=v0256 +// b036 | | | +-+ +// | | | +=v0016 +// b014 | +-+ +// | | +=v0017 +// b037 | | +-+ +// | | | | +=v0048 +// b083 | | | | +-+ +// | | | | | +=v0049 +// b055 | | | +-+ +// | | | | +=v0062 +// b084 | | | +-+ +// | | | +=v0063 +// b026 | | +-+ +// | | | | +=v0030 +// b085 | | | | +-+ +// | | | | | +=v0031 +// b056 | | | | +-+ +// | | | | | | +=v0032 +// b086 | | | | | +-+ +// | | | | | +=v0033 +// b038 | | | +-+ +// | | | | +=v0040 +// b087 | | | | +-+ +// | | | | | +=v0041 +// b057 | | | +-+ +// | | | +=v0022 +// b020 | +-+ +// | +=v0014 +// b008 +-+ +// | | +=v0010 +// b015 | | +-+ +// | | | +=v0011 +// b011 | +-+ +// | | +=v0015 +// b027 | | +-+ +// | | | | +=v0128 +// b088 | | | | +-+ +// | | | | | +=v0192 +// b058 | | | | +-+ +// | | | | | | +=v0026 +// b089 | | | | | +-+ +// | | | | | +=v0027 +// b039 | | | +-+ +// | | | | +=v0028 +// b090 | | | | +-+ +// | | | | | +=v0029 +// b059 | | | +-+ +// | | | +=v0019 +// b021 | | +-+ +// | | | | +=v0020 +// b060 | | | | +-+ +// | | | | | | +=v0034 +// b091 | | | | | +-+ +// | | | | | +=v0035 +// b040 | | | | +-+ +// | | | | | | +=v0036 +// b092 | | | | | | +-+ +// | | | | | | | +=v0037 +// b061 | | | | | +-+ +// | | | | | | +=v0038 +// b093 | | | | | +-+ +// | | | | | +=v0039 +// b028 | | | +-+ +// | | | | +=v0021 +// b062 | | | | +-+ +// | | | | | | +=v0042 +// b094 | | | | | +-+ +// | | | | | +=v0043 +// b041 | | | +-+ +// | | | +=v0000 +// b016 | +-+ +// | +=v0012 +// b006 +-+ +// | | +=v0009 +// b012 | | +-+ +// | | | +=v0008 +// b009 | +-+ +// | +=v0007 +// b004 +-+ +// | | +=v0006 +// b007 | +-+ +// | +=v0005 +// b002 +-+ +// | | +=v0001 +// b005 | +-+ +// | +=v0004 +// b001 +-+ +// | +=v0003 +// b003 +-+ +// +=v0002 +var blackDecodeTable = [...][2]int16{ + 0: {0, 0}, + 1: {2, 3}, + 2: {4, 5}, + 3: {^3, ^2}, + 4: {6, 7}, + 5: {^1, ^4}, + 6: {8, 9}, + 7: {^6, ^5}, + 8: {10, 11}, + 9: {12, ^7}, + 10: {13, 14}, + 11: {15, 16}, + 12: {^9, ^8}, + 13: {17, 18}, + 14: {19, 20}, + 15: {^10, ^11}, + 16: {21, ^12}, + 17: {0, 22}, + 18: {23, 24}, + 19: {^13, 25}, + 20: {26, ^14}, + 21: {27, 28}, + 22: {29, 30}, + 23: {31, 32}, + 24: {33, 34}, + 25: {35, 36}, + 26: {37, 38}, + 27: {^15, 39}, + 28: {40, 41}, + 29: {42, 43}, + 30: {44, 45}, + 31: {^18, 46}, + 32: {47, 48}, + 33: {49, 50}, + 34: {51, ^64}, + 35: {52, 53}, + 36: {54, ^16}, + 37: {^17, 55}, + 38: {56, 57}, + 39: {58, 59}, + 40: {60, 61}, + 41: {62, ^0}, + 42: {^1792, 63}, + 43: {64, 65}, + 44: {^1856, ^1920}, + 45: {66, 67}, + 46: {68, 69}, + 47: {70, 71}, + 48: {72, ^24}, + 49: {^25, 73}, + 50: {74, 75}, + 51: {76, 77}, + 52: {^23, 78}, + 53: {79, 80}, + 54: {81, 82}, + 55: {83, 84}, + 56: {85, 86}, + 57: {87, ^22}, + 58: {88, 89}, + 59: {90, ^19}, + 60: {^20, 91}, + 61: {92, 93}, + 62: {^21, 94}, + 63: {^1984, ^2048}, + 64: {^2112, ^2176}, + 65: {^2240, ^2304}, + 66: {^2368, ^2432}, + 67: {^2496, ^2560}, + 68: {^52, 95}, + 69: {96, ^55}, + 70: {^56, 97}, + 71: {98, ^59}, + 72: {^60, 99}, + 73: {100, ^320}, + 74: {^384, ^448}, + 75: {101, ^53}, + 76: {^54, 102}, + 77: {103, 104}, + 78: {^50, ^51}, + 79: {^44, ^45}, + 80: {^46, ^47}, + 81: {^57, ^58}, + 82: {^61, ^256}, + 83: {^48, ^49}, + 84: {^62, ^63}, + 85: {^30, ^31}, + 86: {^32, ^33}, + 87: {^40, ^41}, + 88: {^128, ^192}, + 89: {^26, ^27}, + 90: {^28, ^29}, + 91: {^34, ^35}, + 92: {^36, ^37}, + 93: {^38, ^39}, + 94: {^42, ^43}, + 95: {^640, ^704}, + 96: {^768, ^832}, + 97: {^1280, ^1344}, + 98: {^1408, ^1472}, + 99: {^1536, ^1600}, + 100: {^1664, ^1728}, + 101: {^512, ^576}, + 102: {^896, ^960}, + 103: {^1024, ^1088}, + 104: {^1152, ^1216}, +} + +const maxCodeLength = 13 + +// Each encodeTable is represented by an array of bitStrings. + +// bitString is a pair of uint32 values representing a bit code. +// The nBits low bits of bits make up the actual bit code. +// Eg. bitString{0x0004, 8} represents the bitcode "00000100". +type bitString struct { + bits uint32 + nBits uint32 +} + +// modeEncodeTable represents Table 1 and the End-of-Line code. +var modeEncodeTable = [...]bitString{ + 0: {0x0001, 4}, // "0001" + 1: {0x0001, 3}, // "001" + 2: {0x0001, 1}, // "1" + 3: {0x0003, 3}, // "011" + 4: {0x0003, 6}, // "000011" + 5: {0x0003, 7}, // "0000011" + 6: {0x0002, 3}, // "010" + 7: {0x0002, 6}, // "000010" + 8: {0x0002, 7}, // "0000010" + 9: {0x0001, 7}, // "0000001" +} + +// whiteEncodeTable2 represents Table 2 for a white run. +var whiteEncodeTable2 = [...]bitString{ + 0: {0x0035, 8}, // "00110101" + 1: {0x0007, 6}, // "000111" + 2: {0x0007, 4}, // "0111" + 3: {0x0008, 4}, // "1000" + 4: {0x000b, 4}, // "1011" + 5: {0x000c, 4}, // "1100" + 6: {0x000e, 4}, // "1110" + 7: {0x000f, 4}, // "1111" + 8: {0x0013, 5}, // "10011" + 9: {0x0014, 5}, // "10100" + 10: {0x0007, 5}, // "00111" + 11: {0x0008, 5}, // "01000" + 12: {0x0008, 6}, // "001000" + 13: {0x0003, 6}, // "000011" + 14: {0x0034, 6}, // "110100" + 15: {0x0035, 6}, // "110101" + 16: {0x002a, 6}, // "101010" + 17: {0x002b, 6}, // "101011" + 18: {0x0027, 7}, // "0100111" + 19: {0x000c, 7}, // "0001100" + 20: {0x0008, 7}, // "0001000" + 21: {0x0017, 7}, // "0010111" + 22: {0x0003, 7}, // "0000011" + 23: {0x0004, 7}, // "0000100" + 24: {0x0028, 7}, // "0101000" + 25: {0x002b, 7}, // "0101011" + 26: {0x0013, 7}, // "0010011" + 27: {0x0024, 7}, // "0100100" + 28: {0x0018, 7}, // "0011000" + 29: {0x0002, 8}, // "00000010" + 30: {0x0003, 8}, // "00000011" + 31: {0x001a, 8}, // "00011010" + 32: {0x001b, 8}, // "00011011" + 33: {0x0012, 8}, // "00010010" + 34: {0x0013, 8}, // "00010011" + 35: {0x0014, 8}, // "00010100" + 36: {0x0015, 8}, // "00010101" + 37: {0x0016, 8}, // "00010110" + 38: {0x0017, 8}, // "00010111" + 39: {0x0028, 8}, // "00101000" + 40: {0x0029, 8}, // "00101001" + 41: {0x002a, 8}, // "00101010" + 42: {0x002b, 8}, // "00101011" + 43: {0x002c, 8}, // "00101100" + 44: {0x002d, 8}, // "00101101" + 45: {0x0004, 8}, // "00000100" + 46: {0x0005, 8}, // "00000101" + 47: {0x000a, 8}, // "00001010" + 48: {0x000b, 8}, // "00001011" + 49: {0x0052, 8}, // "01010010" + 50: {0x0053, 8}, // "01010011" + 51: {0x0054, 8}, // "01010100" + 52: {0x0055, 8}, // "01010101" + 53: {0x0024, 8}, // "00100100" + 54: {0x0025, 8}, // "00100101" + 55: {0x0058, 8}, // "01011000" + 56: {0x0059, 8}, // "01011001" + 57: {0x005a, 8}, // "01011010" + 58: {0x005b, 8}, // "01011011" + 59: {0x004a, 8}, // "01001010" + 60: {0x004b, 8}, // "01001011" + 61: {0x0032, 8}, // "00110010" + 62: {0x0033, 8}, // "00110011" + 63: {0x0034, 8}, // "00110100" +} + +// whiteEncodeTable3 represents Table 3 for a white run. +var whiteEncodeTable3 = [...]bitString{ + 0: {0x001b, 5}, // "11011" + 1: {0x0012, 5}, // "10010" + 2: {0x0017, 6}, // "010111" + 3: {0x0037, 7}, // "0110111" + 4: {0x0036, 8}, // "00110110" + 5: {0x0037, 8}, // "00110111" + 6: {0x0064, 8}, // "01100100" + 7: {0x0065, 8}, // "01100101" + 8: {0x0068, 8}, // "01101000" + 9: {0x0067, 8}, // "01100111" + 10: {0x00cc, 9}, // "011001100" + 11: {0x00cd, 9}, // "011001101" + 12: {0x00d2, 9}, // "011010010" + 13: {0x00d3, 9}, // "011010011" + 14: {0x00d4, 9}, // "011010100" + 15: {0x00d5, 9}, // "011010101" + 16: {0x00d6, 9}, // "011010110" + 17: {0x00d7, 9}, // "011010111" + 18: {0x00d8, 9}, // "011011000" + 19: {0x00d9, 9}, // "011011001" + 20: {0x00da, 9}, // "011011010" + 21: {0x00db, 9}, // "011011011" + 22: {0x0098, 9}, // "010011000" + 23: {0x0099, 9}, // "010011001" + 24: {0x009a, 9}, // "010011010" + 25: {0x0018, 6}, // "011000" + 26: {0x009b, 9}, // "010011011" + 27: {0x0008, 11}, // "00000001000" + 28: {0x000c, 11}, // "00000001100" + 29: {0x000d, 11}, // "00000001101" + 30: {0x0012, 12}, // "000000010010" + 31: {0x0013, 12}, // "000000010011" + 32: {0x0014, 12}, // "000000010100" + 33: {0x0015, 12}, // "000000010101" + 34: {0x0016, 12}, // "000000010110" + 35: {0x0017, 12}, // "000000010111" + 36: {0x001c, 12}, // "000000011100" + 37: {0x001d, 12}, // "000000011101" + 38: {0x001e, 12}, // "000000011110" + 39: {0x001f, 12}, // "000000011111" +} + +// blackEncodeTable2 represents Table 2 for a black run. +var blackEncodeTable2 = [...]bitString{ + 0: {0x0037, 10}, // "0000110111" + 1: {0x0002, 3}, // "010" + 2: {0x0003, 2}, // "11" + 3: {0x0002, 2}, // "10" + 4: {0x0003, 3}, // "011" + 5: {0x0003, 4}, // "0011" + 6: {0x0002, 4}, // "0010" + 7: {0x0003, 5}, // "00011" + 8: {0x0005, 6}, // "000101" + 9: {0x0004, 6}, // "000100" + 10: {0x0004, 7}, // "0000100" + 11: {0x0005, 7}, // "0000101" + 12: {0x0007, 7}, // "0000111" + 13: {0x0004, 8}, // "00000100" + 14: {0x0007, 8}, // "00000111" + 15: {0x0018, 9}, // "000011000" + 16: {0x0017, 10}, // "0000010111" + 17: {0x0018, 10}, // "0000011000" + 18: {0x0008, 10}, // "0000001000" + 19: {0x0067, 11}, // "00001100111" + 20: {0x0068, 11}, // "00001101000" + 21: {0x006c, 11}, // "00001101100" + 22: {0x0037, 11}, // "00000110111" + 23: {0x0028, 11}, // "00000101000" + 24: {0x0017, 11}, // "00000010111" + 25: {0x0018, 11}, // "00000011000" + 26: {0x00ca, 12}, // "000011001010" + 27: {0x00cb, 12}, // "000011001011" + 28: {0x00cc, 12}, // "000011001100" + 29: {0x00cd, 12}, // "000011001101" + 30: {0x0068, 12}, // "000001101000" + 31: {0x0069, 12}, // "000001101001" + 32: {0x006a, 12}, // "000001101010" + 33: {0x006b, 12}, // "000001101011" + 34: {0x00d2, 12}, // "000011010010" + 35: {0x00d3, 12}, // "000011010011" + 36: {0x00d4, 12}, // "000011010100" + 37: {0x00d5, 12}, // "000011010101" + 38: {0x00d6, 12}, // "000011010110" + 39: {0x00d7, 12}, // "000011010111" + 40: {0x006c, 12}, // "000001101100" + 41: {0x006d, 12}, // "000001101101" + 42: {0x00da, 12}, // "000011011010" + 43: {0x00db, 12}, // "000011011011" + 44: {0x0054, 12}, // "000001010100" + 45: {0x0055, 12}, // "000001010101" + 46: {0x0056, 12}, // "000001010110" + 47: {0x0057, 12}, // "000001010111" + 48: {0x0064, 12}, // "000001100100" + 49: {0x0065, 12}, // "000001100101" + 50: {0x0052, 12}, // "000001010010" + 51: {0x0053, 12}, // "000001010011" + 52: {0x0024, 12}, // "000000100100" + 53: {0x0037, 12}, // "000000110111" + 54: {0x0038, 12}, // "000000111000" + 55: {0x0027, 12}, // "000000100111" + 56: {0x0028, 12}, // "000000101000" + 57: {0x0058, 12}, // "000001011000" + 58: {0x0059, 12}, // "000001011001" + 59: {0x002b, 12}, // "000000101011" + 60: {0x002c, 12}, // "000000101100" + 61: {0x005a, 12}, // "000001011010" + 62: {0x0066, 12}, // "000001100110" + 63: {0x0067, 12}, // "000001100111" +} + +// blackEncodeTable3 represents Table 3 for a black run. +var blackEncodeTable3 = [...]bitString{ + 0: {0x000f, 10}, // "0000001111" + 1: {0x00c8, 12}, // "000011001000" + 2: {0x00c9, 12}, // "000011001001" + 3: {0x005b, 12}, // "000001011011" + 4: {0x0033, 12}, // "000000110011" + 5: {0x0034, 12}, // "000000110100" + 6: {0x0035, 12}, // "000000110101" + 7: {0x006c, 13}, // "0000001101100" + 8: {0x006d, 13}, // "0000001101101" + 9: {0x004a, 13}, // "0000001001010" + 10: {0x004b, 13}, // "0000001001011" + 11: {0x004c, 13}, // "0000001001100" + 12: {0x004d, 13}, // "0000001001101" + 13: {0x0072, 13}, // "0000001110010" + 14: {0x0073, 13}, // "0000001110011" + 15: {0x0074, 13}, // "0000001110100" + 16: {0x0075, 13}, // "0000001110101" + 17: {0x0076, 13}, // "0000001110110" + 18: {0x0077, 13}, // "0000001110111" + 19: {0x0052, 13}, // "0000001010010" + 20: {0x0053, 13}, // "0000001010011" + 21: {0x0054, 13}, // "0000001010100" + 22: {0x0055, 13}, // "0000001010101" + 23: {0x005a, 13}, // "0000001011010" + 24: {0x005b, 13}, // "0000001011011" + 25: {0x0064, 13}, // "0000001100100" + 26: {0x0065, 13}, // "0000001100101" + 27: {0x0008, 11}, // "00000001000" + 28: {0x000c, 11}, // "00000001100" + 29: {0x000d, 11}, // "00000001101" + 30: {0x0012, 12}, // "000000010010" + 31: {0x0013, 12}, // "000000010011" + 32: {0x0014, 12}, // "000000010100" + 33: {0x0015, 12}, // "000000010101" + 34: {0x0016, 12}, // "000000010110" + 35: {0x0017, 12}, // "000000010111" + 36: {0x001c, 12}, // "000000011100" + 37: {0x001d, 12}, // "000000011101" + 38: {0x001e, 12}, // "000000011110" + 39: {0x001f, 12}, // "000000011111" +} + +// COPY PASTE table.go BEGIN + +const ( + modePass = iota // Pass + modeH // Horizontal + modeV0 // Vertical-0 + modeVR1 // Vertical-Right-1 + modeVR2 // Vertical-Right-2 + modeVR3 // Vertical-Right-3 + modeVL1 // Vertical-Left-1 + modeVL2 // Vertical-Left-2 + modeVL3 // Vertical-Left-3 + modeExt // Extension +) + +// COPY PASTE table.go END diff --git a/vendor/golang.org/x/image/ccitt/writer.go b/vendor/golang.org/x/image/ccitt/writer.go new file mode 100644 index 0000000..87130ab --- /dev/null +++ b/vendor/golang.org/x/image/ccitt/writer.go @@ -0,0 +1,102 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ccitt + +import ( + "encoding/binary" + "io" +) + +type bitWriter struct { + w io.Writer + + // order is whether to process w's bytes LSB first or MSB first. + order Order + + // The high nBits bits of the bits field hold encoded bits to be written to w. + bits uint64 + nBits uint32 + + // bytes[:bw] holds encoded bytes not yet written to w. + // Overflow protection is ensured by using a multiple of 8 as bytes length. + bw uint32 + bytes [1024]uint8 +} + +// flushBits copies 64 bits from b.bits to b.bytes. If b.bytes is then full, it +// is written to b.w. +func (b *bitWriter) flushBits() error { + binary.BigEndian.PutUint64(b.bytes[b.bw:], b.bits) + b.bits = 0 + b.nBits = 0 + b.bw += 8 + if b.bw < uint32(len(b.bytes)) { + return nil + } + b.bw = 0 + if b.order != MSB { + reverseBitsWithinBytes(b.bytes[:]) + } + _, err := b.w.Write(b.bytes[:]) + return err +} + +// close finalizes a bitcode stream by writing any +// pending bits to bitWriter's underlying io.Writer. +func (b *bitWriter) close() error { + // Write any encoded bits to bytes. + if b.nBits > 0 { + binary.BigEndian.PutUint64(b.bytes[b.bw:], b.bits) + b.bw += (b.nBits + 7) >> 3 + } + + if b.order != MSB { + reverseBitsWithinBytes(b.bytes[:b.bw]) + } + + // Write b.bw bytes to b.w. + _, err := b.w.Write(b.bytes[:b.bw]) + return err +} + +// alignToByteBoundary rounds b.nBits up to a multiple of 8. +// If all 64 bits are used, flush them to bitWriter's bytes. +func (b *bitWriter) alignToByteBoundary() error { + if b.nBits = (b.nBits + 7) &^ 7; b.nBits == 64 { + return b.flushBits() + } + return nil +} + +// writeCode writes a variable length bitcode to b's underlying io.Writer. +func (b *bitWriter) writeCode(bs bitString) error { + bits := bs.bits + nBits := bs.nBits + if 64-b.nBits >= nBits { + // b.bits has sufficient room for storing nBits bits. + b.bits |= uint64(bits) << (64 - nBits - b.nBits) + b.nBits += nBits + if b.nBits == 64 { + return b.flushBits() + } + return nil + } + + // Number of leading bits that fill b.bits. + i := 64 - b.nBits + + // Fill b.bits then flush and write remaining bits. + b.bits |= uint64(bits) >> (nBits - i) + b.nBits = 64 + + if err := b.flushBits(); err != nil { + return err + } + + nBits -= i + b.bits = uint64(bits) << (64 - nBits) + b.nBits = nBits + return nil +} diff --git a/vendor/golang.org/x/image/tiff/buffer.go b/vendor/golang.org/x/image/tiff/buffer.go new file mode 100644 index 0000000..d1801be --- /dev/null +++ b/vendor/golang.org/x/image/tiff/buffer.go @@ -0,0 +1,69 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tiff + +import "io" + +// buffer buffers an io.Reader to satisfy io.ReaderAt. +type buffer struct { + r io.Reader + buf []byte +} + +// fill reads data from b.r until the buffer contains at least end bytes. +func (b *buffer) fill(end int) error { + m := len(b.buf) + if end > m { + if end > cap(b.buf) { + newcap := 1024 + for newcap < end { + newcap *= 2 + } + newbuf := make([]byte, end, newcap) + copy(newbuf, b.buf) + b.buf = newbuf + } else { + b.buf = b.buf[:end] + } + if n, err := io.ReadFull(b.r, b.buf[m:end]); err != nil { + end = m + n + b.buf = b.buf[:end] + return err + } + } + return nil +} + +func (b *buffer) ReadAt(p []byte, off int64) (int, error) { + o := int(off) + end := o + len(p) + if int64(end) != off+int64(len(p)) { + return 0, io.ErrUnexpectedEOF + } + + err := b.fill(end) + return copy(p, b.buf[o:end]), err +} + +// Slice returns a slice of the underlying buffer. The slice contains +// n bytes starting at offset off. +func (b *buffer) Slice(off, n int) ([]byte, error) { + end := off + n + if err := b.fill(end); err != nil { + return nil, err + } + return b.buf[off:end], nil +} + +// newReaderAt converts an io.Reader into an io.ReaderAt. +func newReaderAt(r io.Reader) io.ReaderAt { + if ra, ok := r.(io.ReaderAt); ok { + return ra + } + return &buffer{ + r: r, + buf: make([]byte, 0, 1024), + } +} diff --git a/vendor/golang.org/x/image/tiff/compress.go b/vendor/golang.org/x/image/tiff/compress.go new file mode 100644 index 0000000..3f176f0 --- /dev/null +++ b/vendor/golang.org/x/image/tiff/compress.go @@ -0,0 +1,58 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tiff + +import ( + "bufio" + "io" +) + +type byteReader interface { + io.Reader + io.ByteReader +} + +// unpackBits decodes the PackBits-compressed data in src and returns the +// uncompressed data. +// +// The PackBits compression format is described in section 9 (p. 42) +// of the TIFF spec. +func unpackBits(r io.Reader) ([]byte, error) { + buf := make([]byte, 128) + dst := make([]byte, 0, 1024) + br, ok := r.(byteReader) + if !ok { + br = bufio.NewReader(r) + } + + for { + b, err := br.ReadByte() + if err != nil { + if err == io.EOF { + return dst, nil + } + return nil, err + } + code := int(int8(b)) + switch { + case code >= 0: + n, err := io.ReadFull(br, buf[:code+1]) + if err != nil { + return nil, err + } + dst = append(dst, buf[:n]...) + case code == -128: + // No-op. + default: + if b, err = br.ReadByte(); err != nil { + return nil, err + } + for j := 0; j < 1-code; j++ { + buf[j] = b + } + dst = append(dst, buf[:1-code]...) + } + } +} diff --git a/vendor/golang.org/x/image/tiff/consts.go b/vendor/golang.org/x/image/tiff/consts.go new file mode 100644 index 0000000..3e5f7f1 --- /dev/null +++ b/vendor/golang.org/x/image/tiff/consts.go @@ -0,0 +1,149 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tiff + +// A tiff image file contains one or more images. The metadata +// of each image is contained in an Image File Directory (IFD), +// which contains entries of 12 bytes each and is described +// on page 14-16 of the specification. An IFD entry consists of +// +// - a tag, which describes the signification of the entry, +// - the data type and length of the entry, +// - the data itself or a pointer to it if it is more than 4 bytes. +// +// The presence of a length means that each IFD is effectively an array. + +const ( + leHeader = "II\x2A\x00" // Header for little-endian files. + beHeader = "MM\x00\x2A" // Header for big-endian files. + + ifdLen = 12 // Length of an IFD entry in bytes. +) + +// Data types (p. 14-16 of the spec). +const ( + dtByte = 1 + dtASCII = 2 + dtShort = 3 + dtLong = 4 + dtRational = 5 +) + +// The length of one instance of each data type in bytes. +var lengths = [...]uint32{0, 1, 1, 2, 4, 8} + +// Tags (see p. 28-41 of the spec). +const ( + tImageWidth = 256 + tImageLength = 257 + tBitsPerSample = 258 + tCompression = 259 + tPhotometricInterpretation = 262 + + tFillOrder = 266 + + tStripOffsets = 273 + tSamplesPerPixel = 277 + tRowsPerStrip = 278 + tStripByteCounts = 279 + + tT4Options = 292 // CCITT Group 3 options, a set of 32 flag bits. + tT6Options = 293 // CCITT Group 4 options, a set of 32 flag bits. + + tTileWidth = 322 + tTileLength = 323 + tTileOffsets = 324 + tTileByteCounts = 325 + + tXResolution = 282 + tYResolution = 283 + tResolutionUnit = 296 + + tPredictor = 317 + tColorMap = 320 + tExtraSamples = 338 + tSampleFormat = 339 +) + +// Compression types (defined in various places in the spec and supplements). +const ( + cNone = 1 + cCCITT = 2 + cG3 = 3 // Group 3 Fax. + cG4 = 4 // Group 4 Fax. + cLZW = 5 + cJPEGOld = 6 // Superseded by cJPEG. + cJPEG = 7 + cDeflate = 8 // zlib compression. + cPackBits = 32773 + cDeflateOld = 32946 // Superseded by cDeflate. +) + +// Photometric interpretation values (see p. 37 of the spec). +const ( + pWhiteIsZero = 0 + pBlackIsZero = 1 + pRGB = 2 + pPaletted = 3 + pTransMask = 4 // transparency mask + pCMYK = 5 + pYCbCr = 6 + pCIELab = 8 +) + +// Values for the tPredictor tag (page 64-65 of the spec). +const ( + prNone = 1 + prHorizontal = 2 +) + +// Values for the tResolutionUnit tag (page 18). +const ( + resNone = 1 + resPerInch = 2 // Dots per inch. + resPerCM = 3 // Dots per centimeter. +) + +// imageMode represents the mode of the image. +type imageMode int + +const ( + mBilevel imageMode = iota + mPaletted + mGray + mGrayInvert + mRGB + mRGBA + mNRGBA + mCMYK +) + +// CompressionType describes the type of compression used in Options. +type CompressionType int + +// Constants for supported compression types. +const ( + Uncompressed CompressionType = iota + Deflate + LZW + CCITTGroup3 + CCITTGroup4 +) + +// specValue returns the compression type constant from the TIFF spec that +// is equivalent to c. +func (c CompressionType) specValue() uint32 { + switch c { + case LZW: + return cLZW + case Deflate: + return cDeflate + case CCITTGroup3: + return cG3 + case CCITTGroup4: + return cG4 + } + return cNone +} diff --git a/vendor/golang.org/x/image/tiff/fuzz.go b/vendor/golang.org/x/image/tiff/fuzz.go new file mode 100644 index 0000000..ec52c78 --- /dev/null +++ b/vendor/golang.org/x/image/tiff/fuzz.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gofuzz + +package tiff + +import "bytes" + +func Fuzz(data []byte) int { + cfg, err := DecodeConfig(bytes.NewReader(data)) + if err != nil { + return 0 + } + if cfg.Width*cfg.Height > 1e6 { + return 0 + } + img, err := Decode(bytes.NewReader(data)) + if err != nil { + return 0 + } + var w bytes.Buffer + err = Encode(&w, img, nil) + if err != nil { + panic(err) + } + return 1 +} diff --git a/vendor/golang.org/x/image/tiff/lzw/reader.go b/vendor/golang.org/x/image/tiff/lzw/reader.go new file mode 100644 index 0000000..78204ba --- /dev/null +++ b/vendor/golang.org/x/image/tiff/lzw/reader.go @@ -0,0 +1,272 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lzw implements the Lempel-Ziv-Welch compressed data format, +// described in T. A. Welch, ``A Technique for High-Performance Data +// Compression'', Computer, 17(6) (June 1984), pp 8-19. +// +// In particular, it implements LZW as used by the TIFF file format, including +// an "off by one" algorithmic difference when compared to standard LZW. +package lzw // import "golang.org/x/image/tiff/lzw" + +/* +This file was branched from src/pkg/compress/lzw/reader.go in the +standard library. Differences from the original are marked with "NOTE". + +The tif_lzw.c file in the libtiff C library has this comment: + +---- +The 5.0 spec describes a different algorithm than Aldus +implements. Specifically, Aldus does code length transitions +one code earlier than should be done (for real LZW). +Earlier versions of this library implemented the correct +LZW algorithm, but emitted codes in a bit order opposite +to the TIFF spec. Thus, to maintain compatibility w/ Aldus +we interpret MSB-LSB ordered codes to be images written w/ +old versions of this library, but otherwise adhere to the +Aldus "off by one" algorithm. +---- + +The Go code doesn't read (invalid) TIFF files written by old versions of +libtiff, but the LZW algorithm in this package still differs from the one in +Go's standard package library to accomodate this "off by one" in valid TIFFs. +*/ + +import ( + "bufio" + "errors" + "fmt" + "io" +) + +// Order specifies the bit ordering in an LZW data stream. +type Order int + +const ( + // LSB means Least Significant Bits first, as used in the GIF file format. + LSB Order = iota + // MSB means Most Significant Bits first, as used in the TIFF and PDF + // file formats. + MSB +) + +const ( + maxWidth = 12 + decoderInvalidCode = 0xffff + flushBuffer = 1 << maxWidth +) + +// decoder is the state from which the readXxx method converts a byte +// stream into a code stream. +type decoder struct { + r io.ByteReader + bits uint32 + nBits uint + width uint + read func(*decoder) (uint16, error) // readLSB or readMSB + litWidth int // width in bits of literal codes + err error + + // The first 1<= 1<>= d.width + d.nBits -= d.width + return code, nil +} + +// readMSB returns the next code for "Most Significant Bits first" data. +func (d *decoder) readMSB() (uint16, error) { + for d.nBits < d.width { + x, err := d.r.ReadByte() + if err != nil { + return 0, err + } + d.bits |= uint32(x) << (24 - d.nBits) + d.nBits += 8 + } + code := uint16(d.bits >> (32 - d.width)) + d.bits <<= d.width + d.nBits -= d.width + return code, nil +} + +func (d *decoder) Read(b []byte) (int, error) { + for { + if len(d.toRead) > 0 { + n := copy(b, d.toRead) + d.toRead = d.toRead[n:] + return n, nil + } + if d.err != nil { + return 0, d.err + } + d.decode() + } +} + +// decode decompresses bytes from r and leaves them in d.toRead. +// read specifies how to decode bytes into codes. +// litWidth is the width in bits of literal codes. +func (d *decoder) decode() { + // Loop over the code stream, converting codes into decompressed bytes. +loop: + for { + code, err := d.read(d) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + d.err = err + break + } + switch { + case code < d.clear: + // We have a literal code. + d.output[d.o] = uint8(code) + d.o++ + if d.last != decoderInvalidCode { + // Save what the hi code expands to. + d.suffix[d.hi] = uint8(code) + d.prefix[d.hi] = d.last + } + case code == d.clear: + d.width = 1 + uint(d.litWidth) + d.hi = d.eof + d.overflow = 1 << d.width + d.last = decoderInvalidCode + continue + case code == d.eof: + d.err = io.EOF + break loop + case code <= d.hi: + c, i := code, len(d.output)-1 + if code == d.hi && d.last != decoderInvalidCode { + // code == hi is a special case which expands to the last expansion + // followed by the head of the last expansion. To find the head, we walk + // the prefix chain until we find a literal code. + c = d.last + for c >= d.clear { + c = d.prefix[c] + } + d.output[i] = uint8(c) + i-- + c = d.last + } + // Copy the suffix chain into output and then write that to w. + for c >= d.clear { + d.output[i] = d.suffix[c] + i-- + c = d.prefix[c] + } + d.output[i] = uint8(c) + d.o += copy(d.output[d.o:], d.output[i:]) + if d.last != decoderInvalidCode { + // Save what the hi code expands to. + d.suffix[d.hi] = uint8(c) + d.prefix[d.hi] = d.last + } + default: + d.err = errors.New("lzw: invalid code") + break loop + } + d.last, d.hi = code, d.hi+1 + if d.hi+1 >= d.overflow { // NOTE: the "+1" is where TIFF's LZW differs from the standard algorithm. + if d.width == maxWidth { + d.last = decoderInvalidCode + } else { + d.width++ + d.overflow <<= 1 + } + } + if d.o >= flushBuffer { + break + } + } + // Flush pending output. + d.toRead = d.output[:d.o] + d.o = 0 +} + +var errClosed = errors.New("lzw: reader/writer is closed") + +func (d *decoder) Close() error { + d.err = errClosed // in case any Reads come along + return nil +} + +// NewReader creates a new io.ReadCloser. +// Reads from the returned io.ReadCloser read and decompress data from r. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. +// It is the caller's responsibility to call Close on the ReadCloser when +// finished reading. +// The number of bits to use for literal codes, litWidth, must be in the +// range [2,8] and is typically 8. It must equal the litWidth +// used during compression. +func NewReader(r io.Reader, order Order, litWidth int) io.ReadCloser { + d := new(decoder) + switch order { + case LSB: + d.read = (*decoder).readLSB + case MSB: + d.read = (*decoder).readMSB + default: + d.err = errors.New("lzw: unknown order") + return d + } + if litWidth < 2 || 8 < litWidth { + d.err = fmt.Errorf("lzw: litWidth %d out of range", litWidth) + return d + } + if br, ok := r.(io.ByteReader); ok { + d.r = br + } else { + d.r = bufio.NewReader(r) + } + d.litWidth = litWidth + d.width = 1 + uint(litWidth) + d.clear = uint16(1) << uint(litWidth) + d.eof, d.hi = d.clear+1, d.clear+1 + d.overflow = uint16(1) << d.width + d.last = decoderInvalidCode + + return d +} diff --git a/vendor/golang.org/x/image/tiff/reader.go b/vendor/golang.org/x/image/tiff/reader.go new file mode 100644 index 0000000..de73f4b --- /dev/null +++ b/vendor/golang.org/x/image/tiff/reader.go @@ -0,0 +1,709 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tiff implements a TIFF image decoder and encoder. +// +// The TIFF specification is at http://partners.adobe.com/public/developer/en/tiff/TIFF6.pdf +package tiff // import "golang.org/x/image/tiff" + +import ( + "compress/zlib" + "encoding/binary" + "fmt" + "image" + "image/color" + "io" + "io/ioutil" + "math" + + "golang.org/x/image/ccitt" + "golang.org/x/image/tiff/lzw" +) + +// A FormatError reports that the input is not a valid TIFF image. +type FormatError string + +func (e FormatError) Error() string { + return "tiff: invalid format: " + string(e) +} + +// An UnsupportedError reports that the input uses a valid but +// unimplemented feature. +type UnsupportedError string + +func (e UnsupportedError) Error() string { + return "tiff: unsupported feature: " + string(e) +} + +var errNoPixels = FormatError("not enough pixel data") + +type decoder struct { + r io.ReaderAt + byteOrder binary.ByteOrder + config image.Config + mode imageMode + bpp uint + features map[int][]uint + palette []color.Color + + buf []byte + off int // Current offset in buf. + v uint32 // Buffer value for reading with arbitrary bit depths. + nbits uint // Remaining number of bits in v. +} + +// firstVal returns the first uint of the features entry with the given tag, +// or 0 if the tag does not exist. +func (d *decoder) firstVal(tag int) uint { + f := d.features[tag] + if len(f) == 0 { + return 0 + } + return f[0] +} + +// ifdUint decodes the IFD entry in p, which must be of the Byte, Short +// or Long type, and returns the decoded uint values. +func (d *decoder) ifdUint(p []byte) (u []uint, err error) { + var raw []byte + if len(p) < ifdLen { + return nil, FormatError("bad IFD entry") + } + + datatype := d.byteOrder.Uint16(p[2:4]) + if dt := int(datatype); dt <= 0 || dt >= len(lengths) { + return nil, UnsupportedError("IFD entry datatype") + } + + count := d.byteOrder.Uint32(p[4:8]) + if count > math.MaxInt32/lengths[datatype] { + return nil, FormatError("IFD data too large") + } + if datalen := lengths[datatype] * count; datalen > 4 { + // The IFD contains a pointer to the real value. + raw = make([]byte, datalen) + _, err = d.r.ReadAt(raw, int64(d.byteOrder.Uint32(p[8:12]))) + } else { + raw = p[8 : 8+datalen] + } + if err != nil { + return nil, err + } + + u = make([]uint, count) + switch datatype { + case dtByte: + for i := uint32(0); i < count; i++ { + u[i] = uint(raw[i]) + } + case dtShort: + for i := uint32(0); i < count; i++ { + u[i] = uint(d.byteOrder.Uint16(raw[2*i : 2*(i+1)])) + } + case dtLong: + for i := uint32(0); i < count; i++ { + u[i] = uint(d.byteOrder.Uint32(raw[4*i : 4*(i+1)])) + } + default: + return nil, UnsupportedError("data type") + } + return u, nil +} + +// parseIFD decides whether the IFD entry in p is "interesting" and +// stows away the data in the decoder. It returns the tag number of the +// entry and an error, if any. +func (d *decoder) parseIFD(p []byte) (int, error) { + tag := d.byteOrder.Uint16(p[0:2]) + switch tag { + case tBitsPerSample, + tExtraSamples, + tPhotometricInterpretation, + tCompression, + tPredictor, + tStripOffsets, + tStripByteCounts, + tRowsPerStrip, + tTileWidth, + tTileLength, + tTileOffsets, + tTileByteCounts, + tImageLength, + tImageWidth, + tFillOrder, + tT4Options, + tT6Options: + val, err := d.ifdUint(p) + if err != nil { + return 0, err + } + d.features[int(tag)] = val + case tColorMap: + val, err := d.ifdUint(p) + if err != nil { + return 0, err + } + numcolors := len(val) / 3 + if len(val)%3 != 0 || numcolors <= 0 || numcolors > 256 { + return 0, FormatError("bad ColorMap length") + } + d.palette = make([]color.Color, numcolors) + for i := 0; i < numcolors; i++ { + d.palette[i] = color.RGBA64{ + uint16(val[i]), + uint16(val[i+numcolors]), + uint16(val[i+2*numcolors]), + 0xffff, + } + } + case tSampleFormat: + // Page 27 of the spec: If the SampleFormat is present and + // the value is not 1 [= unsigned integer data], a Baseline + // TIFF reader that cannot handle the SampleFormat value + // must terminate the import process gracefully. + val, err := d.ifdUint(p) + if err != nil { + return 0, err + } + for _, v := range val { + if v != 1 { + return 0, UnsupportedError("sample format") + } + } + } + return int(tag), nil +} + +// readBits reads n bits from the internal buffer starting at the current offset. +func (d *decoder) readBits(n uint) (v uint32, ok bool) { + for d.nbits < n { + d.v <<= 8 + if d.off >= len(d.buf) { + return 0, false + } + d.v |= uint32(d.buf[d.off]) + d.off++ + d.nbits += 8 + } + d.nbits -= n + rv := d.v >> d.nbits + d.v &^= rv << d.nbits + return rv, true +} + +// flushBits discards the unread bits in the buffer used by readBits. +// It is used at the end of a line. +func (d *decoder) flushBits() { + d.v = 0 + d.nbits = 0 +} + +// minInt returns the smaller of x or y. +func minInt(a, b int) int { + if a <= b { + return a + } + return b +} + +// decode decodes the raw data of an image. +// It reads from d.buf and writes the strip or tile into dst. +func (d *decoder) decode(dst image.Image, xmin, ymin, xmax, ymax int) error { + d.off = 0 + + // Apply horizontal predictor if necessary. + // In this case, p contains the color difference to the preceding pixel. + // See page 64-65 of the spec. + if d.firstVal(tPredictor) == prHorizontal { + switch d.bpp { + case 16: + var off int + n := 2 * len(d.features[tBitsPerSample]) // bytes per sample times samples per pixel + for y := ymin; y < ymax; y++ { + off += n + for x := 0; x < (xmax-xmin-1)*n; x += 2 { + if off+2 > len(d.buf) { + return errNoPixels + } + v0 := d.byteOrder.Uint16(d.buf[off-n : off-n+2]) + v1 := d.byteOrder.Uint16(d.buf[off : off+2]) + d.byteOrder.PutUint16(d.buf[off:off+2], v1+v0) + off += 2 + } + } + case 8: + var off int + n := 1 * len(d.features[tBitsPerSample]) // bytes per sample times samples per pixel + for y := ymin; y < ymax; y++ { + off += n + for x := 0; x < (xmax-xmin-1)*n; x++ { + if off >= len(d.buf) { + return errNoPixels + } + d.buf[off] += d.buf[off-n] + off++ + } + } + case 1: + return UnsupportedError("horizontal predictor with 1 BitsPerSample") + } + } + + rMaxX := minInt(xmax, dst.Bounds().Max.X) + rMaxY := minInt(ymax, dst.Bounds().Max.Y) + switch d.mode { + case mGray, mGrayInvert: + if d.bpp == 16 { + img := dst.(*image.Gray16) + for y := ymin; y < rMaxY; y++ { + for x := xmin; x < rMaxX; x++ { + if d.off+2 > len(d.buf) { + return errNoPixels + } + v := d.byteOrder.Uint16(d.buf[d.off : d.off+2]) + d.off += 2 + if d.mode == mGrayInvert { + v = 0xffff - v + } + img.SetGray16(x, y, color.Gray16{v}) + } + if rMaxX == img.Bounds().Max.X { + d.off += 2 * (xmax - img.Bounds().Max.X) + } + } + } else { + img := dst.(*image.Gray) + max := uint32((1 << d.bpp) - 1) + for y := ymin; y < rMaxY; y++ { + for x := xmin; x < rMaxX; x++ { + v, ok := d.readBits(d.bpp) + if !ok { + return errNoPixels + } + v = v * 0xff / max + if d.mode == mGrayInvert { + v = 0xff - v + } + img.SetGray(x, y, color.Gray{uint8(v)}) + } + d.flushBits() + } + } + case mPaletted: + img := dst.(*image.Paletted) + for y := ymin; y < rMaxY; y++ { + for x := xmin; x < rMaxX; x++ { + v, ok := d.readBits(d.bpp) + if !ok { + return errNoPixels + } + img.SetColorIndex(x, y, uint8(v)) + } + d.flushBits() + } + case mRGB: + if d.bpp == 16 { + img := dst.(*image.RGBA64) + for y := ymin; y < rMaxY; y++ { + for x := xmin; x < rMaxX; x++ { + if d.off+6 > len(d.buf) { + return errNoPixels + } + r := d.byteOrder.Uint16(d.buf[d.off+0 : d.off+2]) + g := d.byteOrder.Uint16(d.buf[d.off+2 : d.off+4]) + b := d.byteOrder.Uint16(d.buf[d.off+4 : d.off+6]) + d.off += 6 + img.SetRGBA64(x, y, color.RGBA64{r, g, b, 0xffff}) + } + } + } else { + img := dst.(*image.RGBA) + for y := ymin; y < rMaxY; y++ { + min := img.PixOffset(xmin, y) + max := img.PixOffset(rMaxX, y) + off := (y - ymin) * (xmax - xmin) * 3 + for i := min; i < max; i += 4 { + if off+3 > len(d.buf) { + return errNoPixels + } + img.Pix[i+0] = d.buf[off+0] + img.Pix[i+1] = d.buf[off+1] + img.Pix[i+2] = d.buf[off+2] + img.Pix[i+3] = 0xff + off += 3 + } + } + } + case mNRGBA: + if d.bpp == 16 { + img := dst.(*image.NRGBA64) + for y := ymin; y < rMaxY; y++ { + for x := xmin; x < rMaxX; x++ { + if d.off+8 > len(d.buf) { + return errNoPixels + } + r := d.byteOrder.Uint16(d.buf[d.off+0 : d.off+2]) + g := d.byteOrder.Uint16(d.buf[d.off+2 : d.off+4]) + b := d.byteOrder.Uint16(d.buf[d.off+4 : d.off+6]) + a := d.byteOrder.Uint16(d.buf[d.off+6 : d.off+8]) + d.off += 8 + img.SetNRGBA64(x, y, color.NRGBA64{r, g, b, a}) + } + } + } else { + img := dst.(*image.NRGBA) + for y := ymin; y < rMaxY; y++ { + min := img.PixOffset(xmin, y) + max := img.PixOffset(rMaxX, y) + i0, i1 := (y-ymin)*(xmax-xmin)*4, (y-ymin+1)*(xmax-xmin)*4 + if i1 > len(d.buf) { + return errNoPixels + } + copy(img.Pix[min:max], d.buf[i0:i1]) + } + } + case mRGBA: + if d.bpp == 16 { + img := dst.(*image.RGBA64) + for y := ymin; y < rMaxY; y++ { + for x := xmin; x < rMaxX; x++ { + if d.off+8 > len(d.buf) { + return errNoPixels + } + r := d.byteOrder.Uint16(d.buf[d.off+0 : d.off+2]) + g := d.byteOrder.Uint16(d.buf[d.off+2 : d.off+4]) + b := d.byteOrder.Uint16(d.buf[d.off+4 : d.off+6]) + a := d.byteOrder.Uint16(d.buf[d.off+6 : d.off+8]) + d.off += 8 + img.SetRGBA64(x, y, color.RGBA64{r, g, b, a}) + } + } + } else { + img := dst.(*image.RGBA) + for y := ymin; y < rMaxY; y++ { + min := img.PixOffset(xmin, y) + max := img.PixOffset(rMaxX, y) + i0, i1 := (y-ymin)*(xmax-xmin)*4, (y-ymin+1)*(xmax-xmin)*4 + if i1 > len(d.buf) { + return errNoPixels + } + copy(img.Pix[min:max], d.buf[i0:i1]) + } + } + } + + return nil +} + +func newDecoder(r io.Reader) (*decoder, error) { + d := &decoder{ + r: newReaderAt(r), + features: make(map[int][]uint), + } + + p := make([]byte, 8) + if _, err := d.r.ReadAt(p, 0); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + switch string(p[0:4]) { + case leHeader: + d.byteOrder = binary.LittleEndian + case beHeader: + d.byteOrder = binary.BigEndian + default: + return nil, FormatError("malformed header") + } + + ifdOffset := int64(d.byteOrder.Uint32(p[4:8])) + + // The first two bytes contain the number of entries (12 bytes each). + if _, err := d.r.ReadAt(p[0:2], ifdOffset); err != nil { + return nil, err + } + numItems := int(d.byteOrder.Uint16(p[0:2])) + + // All IFD entries are read in one chunk. + p = make([]byte, ifdLen*numItems) + if _, err := d.r.ReadAt(p, ifdOffset+2); err != nil { + return nil, err + } + + prevTag := -1 + for i := 0; i < len(p); i += ifdLen { + tag, err := d.parseIFD(p[i : i+ifdLen]) + if err != nil { + return nil, err + } + if tag <= prevTag { + return nil, FormatError("tags are not sorted in ascending order") + } + prevTag = tag + } + + d.config.Width = int(d.firstVal(tImageWidth)) + d.config.Height = int(d.firstVal(tImageLength)) + + if _, ok := d.features[tBitsPerSample]; !ok { + // Default is 1 per specification. + d.features[tBitsPerSample] = []uint{1} + } + d.bpp = d.firstVal(tBitsPerSample) + switch d.bpp { + case 0: + return nil, FormatError("BitsPerSample must not be 0") + case 1, 8, 16: + // Nothing to do, these are accepted by this implementation. + default: + return nil, UnsupportedError(fmt.Sprintf("BitsPerSample of %v", d.bpp)) + } + + // Determine the image mode. + switch d.firstVal(tPhotometricInterpretation) { + case pRGB: + if d.bpp == 16 { + for _, b := range d.features[tBitsPerSample] { + if b != 16 { + return nil, FormatError("wrong number of samples for 16bit RGB") + } + } + } else { + for _, b := range d.features[tBitsPerSample] { + if b != 8 { + return nil, FormatError("wrong number of samples for 8bit RGB") + } + } + } + // RGB images normally have 3 samples per pixel. + // If there are more, ExtraSamples (p. 31-32 of the spec) + // gives their meaning (usually an alpha channel). + // + // This implementation does not support extra samples + // of an unspecified type. + switch len(d.features[tBitsPerSample]) { + case 3: + d.mode = mRGB + if d.bpp == 16 { + d.config.ColorModel = color.RGBA64Model + } else { + d.config.ColorModel = color.RGBAModel + } + case 4: + switch d.firstVal(tExtraSamples) { + case 1: + d.mode = mRGBA + if d.bpp == 16 { + d.config.ColorModel = color.RGBA64Model + } else { + d.config.ColorModel = color.RGBAModel + } + case 2: + d.mode = mNRGBA + if d.bpp == 16 { + d.config.ColorModel = color.NRGBA64Model + } else { + d.config.ColorModel = color.NRGBAModel + } + default: + return nil, FormatError("wrong number of samples for RGB") + } + default: + return nil, FormatError("wrong number of samples for RGB") + } + case pPaletted: + d.mode = mPaletted + d.config.ColorModel = color.Palette(d.palette) + case pWhiteIsZero: + d.mode = mGrayInvert + if d.bpp == 16 { + d.config.ColorModel = color.Gray16Model + } else { + d.config.ColorModel = color.GrayModel + } + case pBlackIsZero: + d.mode = mGray + if d.bpp == 16 { + d.config.ColorModel = color.Gray16Model + } else { + d.config.ColorModel = color.GrayModel + } + default: + return nil, UnsupportedError("color model") + } + + return d, nil +} + +// DecodeConfig returns the color model and dimensions of a TIFF image without +// decoding the entire image. +func DecodeConfig(r io.Reader) (image.Config, error) { + d, err := newDecoder(r) + if err != nil { + return image.Config{}, err + } + return d.config, nil +} + +func ccittFillOrder(tiffFillOrder uint) ccitt.Order { + if tiffFillOrder == 2 { + return ccitt.LSB + } + return ccitt.MSB +} + +// Decode reads a TIFF image from r and returns it as an image.Image. +// The type of Image returned depends on the contents of the TIFF. +func Decode(r io.Reader) (img image.Image, err error) { + d, err := newDecoder(r) + if err != nil { + return + } + + blockPadding := false + blockWidth := d.config.Width + blockHeight := d.config.Height + blocksAcross := 1 + blocksDown := 1 + + if d.config.Width == 0 { + blocksAcross = 0 + } + if d.config.Height == 0 { + blocksDown = 0 + } + + var blockOffsets, blockCounts []uint + + if int(d.firstVal(tTileWidth)) != 0 { + blockPadding = true + + blockWidth = int(d.firstVal(tTileWidth)) + blockHeight = int(d.firstVal(tTileLength)) + + if blockWidth != 0 { + blocksAcross = (d.config.Width + blockWidth - 1) / blockWidth + } + if blockHeight != 0 { + blocksDown = (d.config.Height + blockHeight - 1) / blockHeight + } + + blockCounts = d.features[tTileByteCounts] + blockOffsets = d.features[tTileOffsets] + + } else { + if int(d.firstVal(tRowsPerStrip)) != 0 { + blockHeight = int(d.firstVal(tRowsPerStrip)) + } + + if blockHeight != 0 { + blocksDown = (d.config.Height + blockHeight - 1) / blockHeight + } + + blockOffsets = d.features[tStripOffsets] + blockCounts = d.features[tStripByteCounts] + } + + // Check if we have the right number of strips/tiles, offsets and counts. + if n := blocksAcross * blocksDown; len(blockOffsets) < n || len(blockCounts) < n { + return nil, FormatError("inconsistent header") + } + + imgRect := image.Rect(0, 0, d.config.Width, d.config.Height) + switch d.mode { + case mGray, mGrayInvert: + if d.bpp == 16 { + img = image.NewGray16(imgRect) + } else { + img = image.NewGray(imgRect) + } + case mPaletted: + img = image.NewPaletted(imgRect, d.palette) + case mNRGBA: + if d.bpp == 16 { + img = image.NewNRGBA64(imgRect) + } else { + img = image.NewNRGBA(imgRect) + } + case mRGB, mRGBA: + if d.bpp == 16 { + img = image.NewRGBA64(imgRect) + } else { + img = image.NewRGBA(imgRect) + } + } + + for i := 0; i < blocksAcross; i++ { + blkW := blockWidth + if !blockPadding && i == blocksAcross-1 && d.config.Width%blockWidth != 0 { + blkW = d.config.Width % blockWidth + } + for j := 0; j < blocksDown; j++ { + blkH := blockHeight + if !blockPadding && j == blocksDown-1 && d.config.Height%blockHeight != 0 { + blkH = d.config.Height % blockHeight + } + offset := int64(blockOffsets[j*blocksAcross+i]) + n := int64(blockCounts[j*blocksAcross+i]) + switch d.firstVal(tCompression) { + + // According to the spec, Compression does not have a default value, + // but some tools interpret a missing Compression value as none so we do + // the same. + case cNone, 0: + if b, ok := d.r.(*buffer); ok { + d.buf, err = b.Slice(int(offset), int(n)) + } else { + d.buf = make([]byte, n) + _, err = d.r.ReadAt(d.buf, offset) + } + case cG3: + inv := d.firstVal(tPhotometricInterpretation) == pWhiteIsZero + order := ccittFillOrder(d.firstVal(tFillOrder)) + r := ccitt.NewReader(io.NewSectionReader(d.r, offset, n), order, ccitt.Group3, blkW, blkH, &ccitt.Options{Invert: inv, Align: false}) + d.buf, err = ioutil.ReadAll(r) + case cG4: + inv := d.firstVal(tPhotometricInterpretation) == pWhiteIsZero + order := ccittFillOrder(d.firstVal(tFillOrder)) + r := ccitt.NewReader(io.NewSectionReader(d.r, offset, n), order, ccitt.Group4, blkW, blkH, &ccitt.Options{Invert: inv, Align: false}) + d.buf, err = ioutil.ReadAll(r) + case cLZW: + r := lzw.NewReader(io.NewSectionReader(d.r, offset, n), lzw.MSB, 8) + d.buf, err = ioutil.ReadAll(r) + r.Close() + case cDeflate, cDeflateOld: + var r io.ReadCloser + r, err = zlib.NewReader(io.NewSectionReader(d.r, offset, n)) + if err != nil { + return nil, err + } + d.buf, err = ioutil.ReadAll(r) + r.Close() + case cPackBits: + d.buf, err = unpackBits(io.NewSectionReader(d.r, offset, n)) + default: + err = UnsupportedError(fmt.Sprintf("compression value %d", d.firstVal(tCompression))) + } + if err != nil { + return nil, err + } + + xmin := i * blockWidth + ymin := j * blockHeight + xmax := xmin + blkW + ymax := ymin + blkH + err = d.decode(img, xmin, ymin, xmax, ymax) + if err != nil { + return nil, err + } + } + } + return +} + +func init() { + image.RegisterFormat("tiff", leHeader, Decode, DecodeConfig) + image.RegisterFormat("tiff", beHeader, Decode, DecodeConfig) +} diff --git a/vendor/golang.org/x/image/tiff/writer.go b/vendor/golang.org/x/image/tiff/writer.go new file mode 100644 index 0000000..c8a01ce --- /dev/null +++ b/vendor/golang.org/x/image/tiff/writer.go @@ -0,0 +1,438 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tiff + +import ( + "bytes" + "compress/zlib" + "encoding/binary" + "image" + "io" + "sort" +) + +// The TIFF format allows to choose the order of the different elements freely. +// The basic structure of a TIFF file written by this package is: +// +// 1. Header (8 bytes). +// 2. Image data. +// 3. Image File Directory (IFD). +// 4. "Pointer area" for larger entries in the IFD. + +// We only write little-endian TIFF files. +var enc = binary.LittleEndian + +// An ifdEntry is a single entry in an Image File Directory. +// A value of type dtRational is composed of two 32-bit values, +// thus data contains two uints (numerator and denominator) for a single number. +type ifdEntry struct { + tag int + datatype int + data []uint32 +} + +func (e ifdEntry) putData(p []byte) { + for _, d := range e.data { + switch e.datatype { + case dtByte, dtASCII: + p[0] = byte(d) + p = p[1:] + case dtShort: + enc.PutUint16(p, uint16(d)) + p = p[2:] + case dtLong, dtRational: + enc.PutUint32(p, uint32(d)) + p = p[4:] + } + } +} + +type byTag []ifdEntry + +func (d byTag) Len() int { return len(d) } +func (d byTag) Less(i, j int) bool { return d[i].tag < d[j].tag } +func (d byTag) Swap(i, j int) { d[i], d[j] = d[j], d[i] } + +func encodeGray(w io.Writer, pix []uint8, dx, dy, stride int, predictor bool) error { + if !predictor { + return writePix(w, pix, dy, dx, stride) + } + buf := make([]byte, dx) + for y := 0; y < dy; y++ { + min := y*stride + 0 + max := y*stride + dx + off := 0 + var v0 uint8 + for i := min; i < max; i++ { + v1 := pix[i] + buf[off] = v1 - v0 + v0 = v1 + off++ + } + if _, err := w.Write(buf); err != nil { + return err + } + } + return nil +} + +func encodeGray16(w io.Writer, pix []uint8, dx, dy, stride int, predictor bool) error { + buf := make([]byte, dx*2) + for y := 0; y < dy; y++ { + min := y*stride + 0 + max := y*stride + dx*2 + off := 0 + var v0 uint16 + for i := min; i < max; i += 2 { + // An image.Gray16's Pix is in big-endian order. + v1 := uint16(pix[i])<<8 | uint16(pix[i+1]) + if predictor { + v0, v1 = v1, v1-v0 + } + // We only write little-endian TIFF files. + buf[off+0] = byte(v1) + buf[off+1] = byte(v1 >> 8) + off += 2 + } + if _, err := w.Write(buf); err != nil { + return err + } + } + return nil +} + +func encodeRGBA(w io.Writer, pix []uint8, dx, dy, stride int, predictor bool) error { + if !predictor { + return writePix(w, pix, dy, dx*4, stride) + } + buf := make([]byte, dx*4) + for y := 0; y < dy; y++ { + min := y*stride + 0 + max := y*stride + dx*4 + off := 0 + var r0, g0, b0, a0 uint8 + for i := min; i < max; i += 4 { + r1, g1, b1, a1 := pix[i+0], pix[i+1], pix[i+2], pix[i+3] + buf[off+0] = r1 - r0 + buf[off+1] = g1 - g0 + buf[off+2] = b1 - b0 + buf[off+3] = a1 - a0 + off += 4 + r0, g0, b0, a0 = r1, g1, b1, a1 + } + if _, err := w.Write(buf); err != nil { + return err + } + } + return nil +} + +func encodeRGBA64(w io.Writer, pix []uint8, dx, dy, stride int, predictor bool) error { + buf := make([]byte, dx*8) + for y := 0; y < dy; y++ { + min := y*stride + 0 + max := y*stride + dx*8 + off := 0 + var r0, g0, b0, a0 uint16 + for i := min; i < max; i += 8 { + // An image.RGBA64's Pix is in big-endian order. + r1 := uint16(pix[i+0])<<8 | uint16(pix[i+1]) + g1 := uint16(pix[i+2])<<8 | uint16(pix[i+3]) + b1 := uint16(pix[i+4])<<8 | uint16(pix[i+5]) + a1 := uint16(pix[i+6])<<8 | uint16(pix[i+7]) + if predictor { + r0, r1 = r1, r1-r0 + g0, g1 = g1, g1-g0 + b0, b1 = b1, b1-b0 + a0, a1 = a1, a1-a0 + } + // We only write little-endian TIFF files. + buf[off+0] = byte(r1) + buf[off+1] = byte(r1 >> 8) + buf[off+2] = byte(g1) + buf[off+3] = byte(g1 >> 8) + buf[off+4] = byte(b1) + buf[off+5] = byte(b1 >> 8) + buf[off+6] = byte(a1) + buf[off+7] = byte(a1 >> 8) + off += 8 + } + if _, err := w.Write(buf); err != nil { + return err + } + } + return nil +} + +func encode(w io.Writer, m image.Image, predictor bool) error { + bounds := m.Bounds() + buf := make([]byte, 4*bounds.Dx()) + for y := bounds.Min.Y; y < bounds.Max.Y; y++ { + off := 0 + if predictor { + var r0, g0, b0, a0 uint8 + for x := bounds.Min.X; x < bounds.Max.X; x++ { + r, g, b, a := m.At(x, y).RGBA() + r1 := uint8(r >> 8) + g1 := uint8(g >> 8) + b1 := uint8(b >> 8) + a1 := uint8(a >> 8) + buf[off+0] = r1 - r0 + buf[off+1] = g1 - g0 + buf[off+2] = b1 - b0 + buf[off+3] = a1 - a0 + off += 4 + r0, g0, b0, a0 = r1, g1, b1, a1 + } + } else { + for x := bounds.Min.X; x < bounds.Max.X; x++ { + r, g, b, a := m.At(x, y).RGBA() + buf[off+0] = uint8(r >> 8) + buf[off+1] = uint8(g >> 8) + buf[off+2] = uint8(b >> 8) + buf[off+3] = uint8(a >> 8) + off += 4 + } + } + if _, err := w.Write(buf); err != nil { + return err + } + } + return nil +} + +// writePix writes the internal byte array of an image to w. It is less general +// but much faster then encode. writePix is used when pix directly +// corresponds to one of the TIFF image types. +func writePix(w io.Writer, pix []byte, nrows, length, stride int) error { + if length == stride { + _, err := w.Write(pix[:nrows*length]) + return err + } + for ; nrows > 0; nrows-- { + if _, err := w.Write(pix[:length]); err != nil { + return err + } + pix = pix[stride:] + } + return nil +} + +func writeIFD(w io.Writer, ifdOffset int, d []ifdEntry) error { + var buf [ifdLen]byte + // Make space for "pointer area" containing IFD entry data + // longer than 4 bytes. + parea := make([]byte, 1024) + pstart := ifdOffset + ifdLen*len(d) + 6 + var o int // Current offset in parea. + + // The IFD has to be written with the tags in ascending order. + sort.Sort(byTag(d)) + + // Write the number of entries in this IFD. + if err := binary.Write(w, enc, uint16(len(d))); err != nil { + return err + } + for _, ent := range d { + enc.PutUint16(buf[0:2], uint16(ent.tag)) + enc.PutUint16(buf[2:4], uint16(ent.datatype)) + count := uint32(len(ent.data)) + if ent.datatype == dtRational { + count /= 2 + } + enc.PutUint32(buf[4:8], count) + datalen := int(count * lengths[ent.datatype]) + if datalen <= 4 { + ent.putData(buf[8:12]) + } else { + if (o + datalen) > len(parea) { + newlen := len(parea) + 1024 + for (o + datalen) > newlen { + newlen += 1024 + } + newarea := make([]byte, newlen) + copy(newarea, parea) + parea = newarea + } + ent.putData(parea[o : o+datalen]) + enc.PutUint32(buf[8:12], uint32(pstart+o)) + o += datalen + } + if _, err := w.Write(buf[:]); err != nil { + return err + } + } + // The IFD ends with the offset of the next IFD in the file, + // or zero if it is the last one (page 14). + if err := binary.Write(w, enc, uint32(0)); err != nil { + return err + } + _, err := w.Write(parea[:o]) + return err +} + +// Options are the encoding parameters. +type Options struct { + // Compression is the type of compression used. + Compression CompressionType + // Predictor determines whether a differencing predictor is used; + // if true, instead of each pixel's color, the color difference to the + // preceding one is saved. This improves the compression for certain + // types of images and compressors. For example, it works well for + // photos with Deflate compression. + Predictor bool +} + +// Encode writes the image m to w. opt determines the options used for +// encoding, such as the compression type. If opt is nil, an uncompressed +// image is written. +func Encode(w io.Writer, m image.Image, opt *Options) error { + d := m.Bounds().Size() + + compression := uint32(cNone) + predictor := false + if opt != nil { + compression = opt.Compression.specValue() + // The predictor field is only used with LZW. See page 64 of the spec. + predictor = opt.Predictor && compression == cLZW + } + + _, err := io.WriteString(w, leHeader) + if err != nil { + return err + } + + // Compressed data is written into a buffer first, so that we + // know the compressed size. + var buf bytes.Buffer + // dst holds the destination for the pixel data of the image -- + // either w or a writer to buf. + var dst io.Writer + // imageLen is the length of the pixel data in bytes. + // The offset of the IFD is imageLen + 8 header bytes. + var imageLen int + + switch compression { + case cNone: + dst = w + // Write IFD offset before outputting pixel data. + switch m.(type) { + case *image.Paletted: + imageLen = d.X * d.Y * 1 + case *image.Gray: + imageLen = d.X * d.Y * 1 + case *image.Gray16: + imageLen = d.X * d.Y * 2 + case *image.RGBA64: + imageLen = d.X * d.Y * 8 + case *image.NRGBA64: + imageLen = d.X * d.Y * 8 + default: + imageLen = d.X * d.Y * 4 + } + err = binary.Write(w, enc, uint32(imageLen+8)) + if err != nil { + return err + } + case cDeflate: + dst = zlib.NewWriter(&buf) + } + + pr := uint32(prNone) + photometricInterpretation := uint32(pRGB) + samplesPerPixel := uint32(4) + bitsPerSample := []uint32{8, 8, 8, 8} + extraSamples := uint32(0) + colorMap := []uint32{} + + if predictor { + pr = prHorizontal + } + switch m := m.(type) { + case *image.Paletted: + photometricInterpretation = pPaletted + samplesPerPixel = 1 + bitsPerSample = []uint32{8} + colorMap = make([]uint32, 256*3) + for i := 0; i < 256 && i < len(m.Palette); i++ { + r, g, b, _ := m.Palette[i].RGBA() + colorMap[i+0*256] = uint32(r) + colorMap[i+1*256] = uint32(g) + colorMap[i+2*256] = uint32(b) + } + err = encodeGray(dst, m.Pix, d.X, d.Y, m.Stride, predictor) + case *image.Gray: + photometricInterpretation = pBlackIsZero + samplesPerPixel = 1 + bitsPerSample = []uint32{8} + err = encodeGray(dst, m.Pix, d.X, d.Y, m.Stride, predictor) + case *image.Gray16: + photometricInterpretation = pBlackIsZero + samplesPerPixel = 1 + bitsPerSample = []uint32{16} + err = encodeGray16(dst, m.Pix, d.X, d.Y, m.Stride, predictor) + case *image.NRGBA: + extraSamples = 2 // Unassociated alpha. + err = encodeRGBA(dst, m.Pix, d.X, d.Y, m.Stride, predictor) + case *image.NRGBA64: + extraSamples = 2 // Unassociated alpha. + bitsPerSample = []uint32{16, 16, 16, 16} + err = encodeRGBA64(dst, m.Pix, d.X, d.Y, m.Stride, predictor) + case *image.RGBA: + extraSamples = 1 // Associated alpha. + err = encodeRGBA(dst, m.Pix, d.X, d.Y, m.Stride, predictor) + case *image.RGBA64: + extraSamples = 1 // Associated alpha. + bitsPerSample = []uint32{16, 16, 16, 16} + err = encodeRGBA64(dst, m.Pix, d.X, d.Y, m.Stride, predictor) + default: + extraSamples = 1 // Associated alpha. + err = encode(dst, m, predictor) + } + if err != nil { + return err + } + + if compression != cNone { + if err = dst.(io.Closer).Close(); err != nil { + return err + } + imageLen = buf.Len() + if err = binary.Write(w, enc, uint32(imageLen+8)); err != nil { + return err + } + if _, err = buf.WriteTo(w); err != nil { + return err + } + } + + ifd := []ifdEntry{ + {tImageWidth, dtShort, []uint32{uint32(d.X)}}, + {tImageLength, dtShort, []uint32{uint32(d.Y)}}, + {tBitsPerSample, dtShort, bitsPerSample}, + {tCompression, dtShort, []uint32{compression}}, + {tPhotometricInterpretation, dtShort, []uint32{photometricInterpretation}}, + {tStripOffsets, dtLong, []uint32{8}}, + {tSamplesPerPixel, dtShort, []uint32{samplesPerPixel}}, + {tRowsPerStrip, dtShort, []uint32{uint32(d.Y)}}, + {tStripByteCounts, dtLong, []uint32{uint32(imageLen)}}, + // There is currently no support for storing the image + // resolution, so give a bogus value of 72x72 dpi. + {tXResolution, dtRational, []uint32{72, 1}}, + {tYResolution, dtRational, []uint32{72, 1}}, + {tResolutionUnit, dtShort, []uint32{resPerInch}}, + } + if pr != prNone { + ifd = append(ifd, ifdEntry{tPredictor, dtShort, []uint32{pr}}) + } + if len(colorMap) != 0 { + ifd = append(ifd, ifdEntry{tColorMap, dtShort, colorMap}) + } + if extraSamples > 0 { + ifd = append(ifd, ifdEntry{tExtraSamples, dtShort, []uint32{extraSamples}}) + } + + return writeIFD(w, imageLen+8, ifd) +} diff --git a/vendor/golang.org/x/net/html/atom/gen.go b/vendor/golang.org/x/net/html/atom/gen.go deleted file mode 100644 index 5d05278..0000000 --- a/vendor/golang.org/x/net/html/atom/gen.go +++ /dev/null @@ -1,712 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -//go:generate go run gen.go -//go:generate go run gen.go -test - -package main - -import ( - "bytes" - "flag" - "fmt" - "go/format" - "io/ioutil" - "math/rand" - "os" - "sort" - "strings" -) - -// identifier converts s to a Go exported identifier. -// It converts "div" to "Div" and "accept-charset" to "AcceptCharset". -func identifier(s string) string { - b := make([]byte, 0, len(s)) - cap := true - for _, c := range s { - if c == '-' { - cap = true - continue - } - if cap && 'a' <= c && c <= 'z' { - c -= 'a' - 'A' - } - cap = false - b = append(b, byte(c)) - } - return string(b) -} - -var test = flag.Bool("test", false, "generate table_test.go") - -func genFile(name string, buf *bytes.Buffer) { - b, err := format.Source(buf.Bytes()) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - if err := ioutil.WriteFile(name, b, 0644); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func main() { - flag.Parse() - - var all []string - all = append(all, elements...) - all = append(all, attributes...) - all = append(all, eventHandlers...) - all = append(all, extra...) - sort.Strings(all) - - // uniq - lists have dups - w := 0 - for _, s := range all { - if w == 0 || all[w-1] != s { - all[w] = s - w++ - } - } - all = all[:w] - - if *test { - var buf bytes.Buffer - fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n") - fmt.Fprintln(&buf, "//go:generate go run gen.go -test\n") - fmt.Fprintln(&buf, "package atom\n") - fmt.Fprintln(&buf, "var testAtomList = []string{") - for _, s := range all { - fmt.Fprintf(&buf, "\t%q,\n", s) - } - fmt.Fprintln(&buf, "}") - - genFile("table_test.go", &buf) - return - } - - // Find hash that minimizes table size. - var best *table - for i := 0; i < 1000000; i++ { - if best != nil && 1<<(best.k-1) < len(all) { - break - } - h := rand.Uint32() - for k := uint(0); k <= 16; k++ { - if best != nil && k >= best.k { - break - } - var t table - if t.init(h, k, all) { - best = &t - break - } - } - } - if best == nil { - fmt.Fprintf(os.Stderr, "failed to construct string table\n") - os.Exit(1) - } - - // Lay out strings, using overlaps when possible. - layout := append([]string{}, all...) - - // Remove strings that are substrings of other strings - for changed := true; changed; { - changed = false - for i, s := range layout { - if s == "" { - continue - } - for j, t := range layout { - if i != j && t != "" && strings.Contains(s, t) { - changed = true - layout[j] = "" - } - } - } - } - - // Join strings where one suffix matches another prefix. - for { - // Find best i, j, k such that layout[i][len-k:] == layout[j][:k], - // maximizing overlap length k. - besti := -1 - bestj := -1 - bestk := 0 - for i, s := range layout { - if s == "" { - continue - } - for j, t := range layout { - if i == j { - continue - } - for k := bestk + 1; k <= len(s) && k <= len(t); k++ { - if s[len(s)-k:] == t[:k] { - besti = i - bestj = j - bestk = k - } - } - } - } - if bestk > 0 { - layout[besti] += layout[bestj][bestk:] - layout[bestj] = "" - continue - } - break - } - - text := strings.Join(layout, "") - - atom := map[string]uint32{} - for _, s := range all { - off := strings.Index(text, s) - if off < 0 { - panic("lost string " + s) - } - atom[s] = uint32(off<<8 | len(s)) - } - - var buf bytes.Buffer - // Generate the Go code. - fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n") - fmt.Fprintln(&buf, "//go:generate go run gen.go\n") - fmt.Fprintln(&buf, "package atom\n\nconst (") - - // compute max len - maxLen := 0 - for _, s := range all { - if maxLen < len(s) { - maxLen = len(s) - } - fmt.Fprintf(&buf, "\t%s Atom = %#x\n", identifier(s), atom[s]) - } - fmt.Fprintln(&buf, ")\n") - - fmt.Fprintf(&buf, "const hash0 = %#x\n\n", best.h0) - fmt.Fprintf(&buf, "const maxAtomLen = %d\n\n", maxLen) - - fmt.Fprintf(&buf, "var table = [1<<%d]Atom{\n", best.k) - for i, s := range best.tab { - if s == "" { - continue - } - fmt.Fprintf(&buf, "\t%#x: %#x, // %s\n", i, atom[s], s) - } - fmt.Fprintf(&buf, "}\n") - datasize := (1 << best.k) * 4 - - fmt.Fprintln(&buf, "const atomText =") - textsize := len(text) - for len(text) > 60 { - fmt.Fprintf(&buf, "\t%q +\n", text[:60]) - text = text[60:] - } - fmt.Fprintf(&buf, "\t%q\n\n", text) - - genFile("table.go", &buf) - - fmt.Fprintf(os.Stdout, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize) -} - -type byLen []string - -func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) } -func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (x byLen) Len() int { return len(x) } - -// fnv computes the FNV hash with an arbitrary starting value h. -func fnv(h uint32, s string) uint32 { - for i := 0; i < len(s); i++ { - h ^= uint32(s[i]) - h *= 16777619 - } - return h -} - -// A table represents an attempt at constructing the lookup table. -// The lookup table uses cuckoo hashing, meaning that each string -// can be found in one of two positions. -type table struct { - h0 uint32 - k uint - mask uint32 - tab []string -} - -// hash returns the two hashes for s. -func (t *table) hash(s string) (h1, h2 uint32) { - h := fnv(t.h0, s) - h1 = h & t.mask - h2 = (h >> 16) & t.mask - return -} - -// init initializes the table with the given parameters. -// h0 is the initial hash value, -// k is the number of bits of hash value to use, and -// x is the list of strings to store in the table. -// init returns false if the table cannot be constructed. -func (t *table) init(h0 uint32, k uint, x []string) bool { - t.h0 = h0 - t.k = k - t.tab = make([]string, 1< len(t.tab) { - return false - } - s := t.tab[i] - h1, h2 := t.hash(s) - j := h1 + h2 - i - if t.tab[j] != "" && !t.push(j, depth+1) { - return false - } - t.tab[j] = s - return true -} - -// The lists of element names and attribute keys were taken from -// https://html.spec.whatwg.org/multipage/indices.html#index -// as of the "HTML Living Standard - Last Updated 16 April 2018" version. - -// "command", "keygen" and "menuitem" have been removed from the spec, -// but are kept here for backwards compatibility. -var elements = []string{ - "a", - "abbr", - "address", - "area", - "article", - "aside", - "audio", - "b", - "base", - "bdi", - "bdo", - "blockquote", - "body", - "br", - "button", - "canvas", - "caption", - "cite", - "code", - "col", - "colgroup", - "command", - "data", - "datalist", - "dd", - "del", - "details", - "dfn", - "dialog", - "div", - "dl", - "dt", - "em", - "embed", - "fieldset", - "figcaption", - "figure", - "footer", - "form", - "h1", - "h2", - "h3", - "h4", - "h5", - "h6", - "head", - "header", - "hgroup", - "hr", - "html", - "i", - "iframe", - "img", - "input", - "ins", - "kbd", - "keygen", - "label", - "legend", - "li", - "link", - "main", - "map", - "mark", - "menu", - "menuitem", - "meta", - "meter", - "nav", - "noscript", - "object", - "ol", - "optgroup", - "option", - "output", - "p", - "param", - "picture", - "pre", - "progress", - "q", - "rp", - "rt", - "ruby", - "s", - "samp", - "script", - "section", - "select", - "slot", - "small", - "source", - "span", - "strong", - "style", - "sub", - "summary", - "sup", - "table", - "tbody", - "td", - "template", - "textarea", - "tfoot", - "th", - "thead", - "time", - "title", - "tr", - "track", - "u", - "ul", - "var", - "video", - "wbr", -} - -// https://html.spec.whatwg.org/multipage/indices.html#attributes-3 -// -// "challenge", "command", "contextmenu", "dropzone", "icon", "keytype", "mediagroup", -// "radiogroup", "spellcheck", "scoped", "seamless", "sortable" and "sorted" have been removed from the spec, -// but are kept here for backwards compatibility. -var attributes = []string{ - "abbr", - "accept", - "accept-charset", - "accesskey", - "action", - "allowfullscreen", - "allowpaymentrequest", - "allowusermedia", - "alt", - "as", - "async", - "autocomplete", - "autofocus", - "autoplay", - "challenge", - "charset", - "checked", - "cite", - "class", - "color", - "cols", - "colspan", - "command", - "content", - "contenteditable", - "contextmenu", - "controls", - "coords", - "crossorigin", - "data", - "datetime", - "default", - "defer", - "dir", - "dirname", - "disabled", - "download", - "draggable", - "dropzone", - "enctype", - "for", - "form", - "formaction", - "formenctype", - "formmethod", - "formnovalidate", - "formtarget", - "headers", - "height", - "hidden", - "high", - "href", - "hreflang", - "http-equiv", - "icon", - "id", - "inputmode", - "integrity", - "is", - "ismap", - "itemid", - "itemprop", - "itemref", - "itemscope", - "itemtype", - "keytype", - "kind", - "label", - "lang", - "list", - "loop", - "low", - "manifest", - "max", - "maxlength", - "media", - "mediagroup", - "method", - "min", - "minlength", - "multiple", - "muted", - "name", - "nomodule", - "nonce", - "novalidate", - "open", - "optimum", - "pattern", - "ping", - "placeholder", - "playsinline", - "poster", - "preload", - "radiogroup", - "readonly", - "referrerpolicy", - "rel", - "required", - "reversed", - "rows", - "rowspan", - "sandbox", - "spellcheck", - "scope", - "scoped", - "seamless", - "selected", - "shape", - "size", - "sizes", - "sortable", - "sorted", - "slot", - "span", - "spellcheck", - "src", - "srcdoc", - "srclang", - "srcset", - "start", - "step", - "style", - "tabindex", - "target", - "title", - "translate", - "type", - "typemustmatch", - "updateviacache", - "usemap", - "value", - "width", - "workertype", - "wrap", -} - -// "onautocomplete", "onautocompleteerror", "onmousewheel", -// "onshow" and "onsort" have been removed from the spec, -// but are kept here for backwards compatibility. -var eventHandlers = []string{ - "onabort", - "onautocomplete", - "onautocompleteerror", - "onauxclick", - "onafterprint", - "onbeforeprint", - "onbeforeunload", - "onblur", - "oncancel", - "oncanplay", - "oncanplaythrough", - "onchange", - "onclick", - "onclose", - "oncontextmenu", - "oncopy", - "oncuechange", - "oncut", - "ondblclick", - "ondrag", - "ondragend", - "ondragenter", - "ondragexit", - "ondragleave", - "ondragover", - "ondragstart", - "ondrop", - "ondurationchange", - "onemptied", - "onended", - "onerror", - "onfocus", - "onhashchange", - "oninput", - "oninvalid", - "onkeydown", - "onkeypress", - "onkeyup", - "onlanguagechange", - "onload", - "onloadeddata", - "onloadedmetadata", - "onloadend", - "onloadstart", - "onmessage", - "onmessageerror", - "onmousedown", - "onmouseenter", - "onmouseleave", - "onmousemove", - "onmouseout", - "onmouseover", - "onmouseup", - "onmousewheel", - "onwheel", - "onoffline", - "ononline", - "onpagehide", - "onpageshow", - "onpaste", - "onpause", - "onplay", - "onplaying", - "onpopstate", - "onprogress", - "onratechange", - "onreset", - "onresize", - "onrejectionhandled", - "onscroll", - "onsecuritypolicyviolation", - "onseeked", - "onseeking", - "onselect", - "onshow", - "onsort", - "onstalled", - "onstorage", - "onsubmit", - "onsuspend", - "ontimeupdate", - "ontoggle", - "onunhandledrejection", - "onunload", - "onvolumechange", - "onwaiting", -} - -// extra are ad-hoc values not covered by any of the lists above. -var extra = []string{ - "acronym", - "align", - "annotation", - "annotation-xml", - "applet", - "basefont", - "bgsound", - "big", - "blink", - "center", - "color", - "desc", - "face", - "font", - "foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive. - "foreignobject", - "frame", - "frameset", - "image", - "isindex", - "listing", - "malignmark", - "marquee", - "math", - "mglyph", - "mi", - "mn", - "mo", - "ms", - "mtext", - "nobr", - "noembed", - "noframes", - "plaintext", - "prompt", - "public", - "rb", - "rtc", - "spacer", - "strike", - "svg", - "system", - "tt", - "xmp", -} diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go index a3a918f..ff7acf2 100644 --- a/vendor/golang.org/x/net/html/const.go +++ b/vendor/golang.org/x/net/html/const.go @@ -52,8 +52,7 @@ var isSpecialElementMap = map[string]bool{ "iframe": true, "img": true, "input": true, - "isindex": true, // The 'isindex' element has been removed, but keep it for backwards compatibility. - "keygen": true, + "keygen": true, // "keygen" has been removed from the spec, but are kept here for backwards compatibility. "li": true, "link": true, "listing": true, diff --git a/vendor/golang.org/x/net/html/foreign.go b/vendor/golang.org/x/net/html/foreign.go index 01477a9..9da9e9d 100644 --- a/vendor/golang.org/x/net/html/foreign.go +++ b/vendor/golang.org/x/net/html/foreign.go @@ -161,66 +161,62 @@ var mathMLAttributeAdjustments = map[string]string{ } var svgAttributeAdjustments = map[string]string{ - "attributename": "attributeName", - "attributetype": "attributeType", - "basefrequency": "baseFrequency", - "baseprofile": "baseProfile", - "calcmode": "calcMode", - "clippathunits": "clipPathUnits", - "contentscripttype": "contentScriptType", - "contentstyletype": "contentStyleType", - "diffuseconstant": "diffuseConstant", - "edgemode": "edgeMode", - "externalresourcesrequired": "externalResourcesRequired", - "filterres": "filterRes", - "filterunits": "filterUnits", - "glyphref": "glyphRef", - "gradienttransform": "gradientTransform", - "gradientunits": "gradientUnits", - "kernelmatrix": "kernelMatrix", - "kernelunitlength": "kernelUnitLength", - "keypoints": "keyPoints", - "keysplines": "keySplines", - "keytimes": "keyTimes", - "lengthadjust": "lengthAdjust", - "limitingconeangle": "limitingConeAngle", - "markerheight": "markerHeight", - "markerunits": "markerUnits", - "markerwidth": "markerWidth", - "maskcontentunits": "maskContentUnits", - "maskunits": "maskUnits", - "numoctaves": "numOctaves", - "pathlength": "pathLength", - "patterncontentunits": "patternContentUnits", - "patterntransform": "patternTransform", - "patternunits": "patternUnits", - "pointsatx": "pointsAtX", - "pointsaty": "pointsAtY", - "pointsatz": "pointsAtZ", - "preservealpha": "preserveAlpha", - "preserveaspectratio": "preserveAspectRatio", - "primitiveunits": "primitiveUnits", - "refx": "refX", - "refy": "refY", - "repeatcount": "repeatCount", - "repeatdur": "repeatDur", - "requiredextensions": "requiredExtensions", - "requiredfeatures": "requiredFeatures", - "specularconstant": "specularConstant", - "specularexponent": "specularExponent", - "spreadmethod": "spreadMethod", - "startoffset": "startOffset", - "stddeviation": "stdDeviation", - "stitchtiles": "stitchTiles", - "surfacescale": "surfaceScale", - "systemlanguage": "systemLanguage", - "tablevalues": "tableValues", - "targetx": "targetX", - "targety": "targetY", - "textlength": "textLength", - "viewbox": "viewBox", - "viewtarget": "viewTarget", - "xchannelselector": "xChannelSelector", - "ychannelselector": "yChannelSelector", - "zoomandpan": "zoomAndPan", + "attributename": "attributeName", + "attributetype": "attributeType", + "basefrequency": "baseFrequency", + "baseprofile": "baseProfile", + "calcmode": "calcMode", + "clippathunits": "clipPathUnits", + "diffuseconstant": "diffuseConstant", + "edgemode": "edgeMode", + "filterunits": "filterUnits", + "glyphref": "glyphRef", + "gradienttransform": "gradientTransform", + "gradientunits": "gradientUnits", + "kernelmatrix": "kernelMatrix", + "kernelunitlength": "kernelUnitLength", + "keypoints": "keyPoints", + "keysplines": "keySplines", + "keytimes": "keyTimes", + "lengthadjust": "lengthAdjust", + "limitingconeangle": "limitingConeAngle", + "markerheight": "markerHeight", + "markerunits": "markerUnits", + "markerwidth": "markerWidth", + "maskcontentunits": "maskContentUnits", + "maskunits": "maskUnits", + "numoctaves": "numOctaves", + "pathlength": "pathLength", + "patterncontentunits": "patternContentUnits", + "patterntransform": "patternTransform", + "patternunits": "patternUnits", + "pointsatx": "pointsAtX", + "pointsaty": "pointsAtY", + "pointsatz": "pointsAtZ", + "preservealpha": "preserveAlpha", + "preserveaspectratio": "preserveAspectRatio", + "primitiveunits": "primitiveUnits", + "refx": "refX", + "refy": "refY", + "repeatcount": "repeatCount", + "repeatdur": "repeatDur", + "requiredextensions": "requiredExtensions", + "requiredfeatures": "requiredFeatures", + "specularconstant": "specularConstant", + "specularexponent": "specularExponent", + "spreadmethod": "spreadMethod", + "startoffset": "startOffset", + "stddeviation": "stdDeviation", + "stitchtiles": "stitchTiles", + "surfacescale": "surfaceScale", + "systemlanguage": "systemLanguage", + "tablevalues": "tableValues", + "targetx": "targetX", + "targety": "targetY", + "textlength": "textLength", + "viewbox": "viewBox", + "viewtarget": "viewTarget", + "xchannelselector": "xChannelSelector", + "ychannelselector": "yChannelSelector", + "zoomandpan": "zoomAndPan", } diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go index 633ee15..1350eef 100644 --- a/vendor/golang.org/x/net/html/node.go +++ b/vendor/golang.org/x/net/html/node.go @@ -18,6 +18,11 @@ const ( ElementNode CommentNode DoctypeNode + // RawNode nodes are not returned by the parser, but can be part of the + // Node tree passed to func Render to insert raw HTML (without escaping). + // If so, this package makes no guarantee that the rendered HTML is secure + // (from e.g. Cross Site Scripting attacks) or well-formed. + RawNode scopeMarkerNode ) diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go index 992cff2..f91466f 100644 --- a/vendor/golang.org/x/net/html/parse.go +++ b/vendor/golang.org/x/net/html/parse.go @@ -184,6 +184,17 @@ func (p *parser) clearStackToContext(s scope) { } } +// parseGenericRawTextElements implements the generic raw text element parsing +// algorithm defined in 12.2.6.2. +// https://html.spec.whatwg.org/multipage/parsing.html#parsing-elements-that-contain-only-text +// TODO: Since both RAWTEXT and RCDATA states are treated as tokenizer's part +// officially, need to make tokenizer consider both states. +func (p *parser) parseGenericRawTextElement() { + p.addElement() + p.originalIM = p.im + p.im = textIM +} + // generateImpliedEndTags pops nodes off the stack of open elements as long as // the top node has a tag name of dd, dt, li, optgroup, option, p, rb, rp, rt or rtc. // If exceptions are specified, nodes with that name will not be popped off. @@ -192,16 +203,17 @@ func (p *parser) generateImpliedEndTags(exceptions ...string) { loop: for i = len(p.oe) - 1; i >= 0; i-- { n := p.oe[i] - if n.Type == ElementNode { - switch n.DataAtom { - case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc: - for _, except := range exceptions { - if n.Data == except { - break loop - } + if n.Type != ElementNode { + break + } + switch n.DataAtom { + case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc: + for _, except := range exceptions { + if n.Data == except { + break loop } - continue } + continue } break } @@ -369,8 +381,7 @@ findIdenticalElements: // Section 12.2.4.3. func (p *parser) clearActiveFormattingElements() { for { - n := p.afe.pop() - if len(p.afe) == 0 || n.Type == scopeMarkerNode { + if n := p.afe.pop(); len(p.afe) == 0 || n.Type == scopeMarkerNode { return } } @@ -625,25 +636,29 @@ func inHeadIM(p *parser) bool { switch p.tok.DataAtom { case a.Html: return inBodyIM(p) - case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta: + case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta: p.addElement() p.oe.pop() p.acknowledgeSelfClosingTag() return true case a.Noscript: - p.addElement() if p.scripting { - p.setOriginalIM() - p.im = textIM - } else { - p.im = inHeadNoscriptIM + p.parseGenericRawTextElement() + return true } + p.addElement() + p.im = inHeadNoscriptIM + // Don't let the tokenizer go into raw text mode when scripting is disabled. + p.tokenizer.NextIsNotRawText() return true - case a.Script, a.Title, a.Noframes, a.Style: + case a.Script, a.Title: p.addElement() p.setOriginalIM() p.im = textIM return true + case a.Noframes, a.Style: + p.parseGenericRawTextElement() + return true case a.Head: // Ignore the token. return true @@ -713,7 +728,13 @@ func inHeadNoscriptIM(p *parser) bool { return inBodyIM(p) case a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Style: return inHeadIM(p) - case a.Head, a.Noscript: + case a.Head: + // Ignore the token. + return true + case a.Noscript: + // Don't let the tokenizer go into raw text mode even when a