From 9a70635b61dfcf9b8ea5b070de1e0d9f7e102742 Mon Sep 17 00:00:00 2001 From: ianmuchyri Date: Mon, 6 Nov 2023 16:56:02 +0300 Subject: [PATCH 1/6] update mainflux to magistrala Signed-off-by: ianmuchyri --- Makefile | 14 +- README.md | 26 +- cmd/ui/main.go | 34 +- docker/.env | 22 +- docker/Dockerfile.dev | 2 +- docker/README.md | 2 +- docker/docker-compose.yml | 32 +- go.mod | 36 +- go.sum | 76 +- package-lock.json | 100 +- ui/README.md | 22 +- ui/api/doc.go | 2 +- ui/api/endpoint.go | 6 +- ui/api/logging.go | 13 +- ui/api/metrics.go | 11 +- ui/api/requests.go | 6 +- ui/api/responses.go | 10 +- ui/api/transport.go | 16 +- ui/service.go | 180 +- .../magistrala}/.dockerignore | 0 .../magistrala}/.gitignore | 4 +- .../magistrala}/.golangci.yml | 2 +- .../magistrala}/ADOPTERS.md | 12 +- .../magistrala}/CHANGELOG.md | 30 +- .../magistrala}/CONTRIBUTING.md | 26 +- .../agent => absmach/magistrala}/LICENSE | 2 +- .../magistrala}/MAINTAINERS | 30 +- .../mainflux => absmach/magistrala}/Makefile | 73 +- .../mainflux => absmach/magistrala}/README.md | 68 +- .../mainflux => absmach/magistrala}/api.go | 4 +- .../magistrala}/auth.pb.go | 542 +- .../magistrala}/auth.proto | 6 +- .../magistrala}/auth_grpc.pb.go | 36 +- .../magistrala}/config.toml | 0 vendor/github.com/absmach/magistrala/doc.go | 6 + .../mainflux => absmach/magistrala}/health.go | 10 +- .../magistrala}/internal/apiutil/errors.go | 4 +- .../magistrala}/internal/apiutil/responses.go | 2 +- .../magistrala}/internal/apiutil/token.go | 2 +- .../magistrala}/internal/apiutil/transport.go | 6 +- .../magistrala}/logger/doc.go | 2 +- .../magistrala}/logger/exit.go | 2 +- .../magistrala}/logger/level.go | 2 +- .../magistrala}/logger/logger.go | 2 +- .../magistrala}/logger/mock.go | 2 +- .../magistrala}/pkg/errors/README.md | 0 .../absmach/magistrala/pkg/errors/doc.go | 5 + .../magistrala}/pkg/errors/errors.go | 4 +- .../magistrala}/pkg/errors/sdk_errors.go | 4 +- .../magistrala}/pkg/errors/types.go | 2 +- .../magistrala}/pkg/messaging/README.md | 0 .../magistrala}/pkg/messaging/message.pb.go | 4 +- .../magistrala}/pkg/messaging/message.proto | 4 +- .../magistrala/pkg/messaging/pubsub.go | 80 + .../absmach/magistrala/pkg/sdk/go/README.md | 83 + .../magistrala}/pkg/sdk/go/bootstrap.go | 34 +- .../magistrala}/pkg/sdk/go/certs.go | 12 +- .../magistrala}/pkg/sdk/go/channels.go | 48 +- .../magistrala}/pkg/sdk/go/consumers.go | 12 +- .../absmach/magistrala/pkg/sdk/go/doc.go | 5 + .../magistrala}/pkg/sdk/go/groups.go | 34 +- .../magistrala}/pkg/sdk/go/health.go | 6 +- .../magistrala}/pkg/sdk/go/message.go | 12 +- .../magistrala}/pkg/sdk/go/metadata.go | 2 +- .../magistrala}/pkg/sdk/go/requests.go | 2 +- .../magistrala}/pkg/sdk/go/responses.go | 4 +- .../magistrala}/pkg/sdk/go/sdk.go | 20 +- .../magistrala}/pkg/sdk/go/things.go | 42 +- .../magistrala}/pkg/sdk/go/tokens.go | 8 +- .../magistrala}/pkg/sdk/go/users.go | 48 +- .../magistrala/pkg/transformers/README.md | 10 + .../magistrala}/pkg/transformers/doc.go | 4 +- .../pkg/transformers/senml/README.md | 4 + .../magistrala}/pkg/transformers/senml/doc.go | 2 +- .../pkg/transformers/senml/message.go | 0 .../pkg/transformers/senml/transformer.go | 8 +- .../pkg/transformers/transformer.go | 6 +- .../magistrala}/pkg/uuid/README.md | 0 .../magistrala}/pkg/uuid/doc.go | 2 +- .../magistrala}/pkg/uuid/mock.go | 8 +- .../magistrala}/pkg/uuid/uuid.go | 10 +- .../mainflux => absmach/magistrala}/uuid.go | 4 +- vendor/github.com/creack/pty/.gitignore | 4 - .../github.com/creack/pty/Dockerfile.golang | 17 - vendor/github.com/creack/pty/Dockerfile.riscv | 23 - vendor/github.com/creack/pty/LICENSE | 23 - vendor/github.com/creack/pty/README.md | 107 - .../github.com/creack/pty/asm_solaris_amd64.s | 18 - vendor/github.com/creack/pty/doc.go | 16 - vendor/github.com/creack/pty/ioctl.go | 19 - vendor/github.com/creack/pty/ioctl_bsd.go | 40 - vendor/github.com/creack/pty/ioctl_solaris.go | 48 - .../creack/pty/ioctl_unsupported.go | 13 - vendor/github.com/creack/pty/mktypes.bash | 19 - vendor/github.com/creack/pty/pty_darwin.go | 68 - vendor/github.com/creack/pty/pty_dragonfly.go | 83 - vendor/github.com/creack/pty/pty_freebsd.go | 81 - vendor/github.com/creack/pty/pty_linux.go | 54 - vendor/github.com/creack/pty/pty_netbsd.go | 69 - vendor/github.com/creack/pty/pty_openbsd.go | 36 - vendor/github.com/creack/pty/pty_solaris.go | 152 - .../github.com/creack/pty/pty_unsupported.go | 12 - vendor/github.com/creack/pty/run.go | 57 - vendor/github.com/creack/pty/start.go | 25 - vendor/github.com/creack/pty/start_windows.go | 19 - .../creack/pty/test_crosscompile.sh | 64 - vendor/github.com/creack/pty/winsize.go | 27 - vendor/github.com/creack/pty/winsize_unix.go | 35 - .../creack/pty/winsize_unsupported.go | 23 - vendor/github.com/creack/pty/ztypes_386.go | 12 - vendor/github.com/creack/pty/ztypes_amd64.go | 12 - vendor/github.com/creack/pty/ztypes_arm.go | 12 - vendor/github.com/creack/pty/ztypes_arm64.go | 12 - .../creack/pty/ztypes_dragonfly_amd64.go | 17 - .../creack/pty/ztypes_freebsd_386.go | 16 - .../creack/pty/ztypes_freebsd_amd64.go | 17 - .../creack/pty/ztypes_freebsd_arm.go | 16 - .../creack/pty/ztypes_freebsd_arm64.go | 16 - .../creack/pty/ztypes_freebsd_ppc64.go | 14 - .../github.com/creack/pty/ztypes_loong64.go | 12 - vendor/github.com/creack/pty/ztypes_mipsx.go | 13 - .../creack/pty/ztypes_netbsd_32bit_int.go | 17 - .../creack/pty/ztypes_openbsd_32bit_int.go | 14 - vendor/github.com/creack/pty/ztypes_ppc64.go | 12 - .../github.com/creack/pty/ztypes_ppc64le.go | 12 - vendor/github.com/creack/pty/ztypes_riscvx.go | 12 - vendor/github.com/creack/pty/ztypes_s390x.go | 12 - .../eclipse/paho.mqtt.golang/.gitignore | 36 - .../eclipse/paho.mqtt.golang/CONTRIBUTING.md | 56 - .../eclipse/paho.mqtt.golang/LICENSE | 294 - .../eclipse/paho.mqtt.golang/NOTICE.md | 77 - .../eclipse/paho.mqtt.golang/README.md | 198 - .../eclipse/paho.mqtt.golang/backoff.go | 104 - .../eclipse/paho.mqtt.golang/client.go | 1240 ---- .../eclipse/paho.mqtt.golang/components.go | 36 - .../eclipse/paho.mqtt.golang/edl-v10 | 15 - .../eclipse/paho.mqtt.golang/epl-v20 | 277 - .../eclipse/paho.mqtt.golang/filestore.go | 261 - .../eclipse/paho.mqtt.golang/memstore.go | 142 - .../paho.mqtt.golang/memstore_ordered.go | 166 - .../eclipse/paho.mqtt.golang/message.go | 131 - .../eclipse/paho.mqtt.golang/messageids.go | 200 - .../eclipse/paho.mqtt.golang/net.go | 470 -- .../eclipse/paho.mqtt.golang/netconn.go | 110 - .../eclipse/paho.mqtt.golang/oops.go | 25 - .../eclipse/paho.mqtt.golang/options.go | 457 -- .../paho.mqtt.golang/options_reader.go | 171 - .../paho.mqtt.golang/packets/connack.go | 68 - .../paho.mqtt.golang/packets/connect.go | 171 - .../paho.mqtt.golang/packets/disconnect.go | 50 - .../paho.mqtt.golang/packets/packets.go | 372 -- .../paho.mqtt.golang/packets/pingreq.go | 50 - .../paho.mqtt.golang/packets/pingresp.go | 50 - .../paho.mqtt.golang/packets/puback.go | 58 - .../paho.mqtt.golang/packets/pubcomp.go | 58 - .../paho.mqtt.golang/packets/publish.go | 99 - .../paho.mqtt.golang/packets/pubrec.go | 58 - .../paho.mqtt.golang/packets/pubrel.go | 58 - .../paho.mqtt.golang/packets/suback.go | 73 - .../paho.mqtt.golang/packets/subscribe.go | 85 - .../paho.mqtt.golang/packets/unsuback.go | 58 - .../paho.mqtt.golang/packets/unsubscribe.go | 72 - .../eclipse/paho.mqtt.golang/ping.go | 78 - .../eclipse/paho.mqtt.golang/router.go | 239 - .../eclipse/paho.mqtt.golang/status.go | 296 - .../eclipse/paho.mqtt.golang/store.go | 140 - .../eclipse/paho.mqtt.golang/token.go | 204 - .../eclipse/paho.mqtt.golang/topic.go | 90 - .../eclipse/paho.mqtt.golang/trace.go | 44 - .../eclipse/paho.mqtt.golang/websocket.go | 132 - .../go-mod-core-contracts/LICENSE | 202 - .../clients/constants.go | 81 - .../go-mod-core-contracts/clients/context.go | 29 - .../go-mod-core-contracts/clients/doc.go | 26 - .../clients/interfaces/url.go | 26 - .../clients/interfaces/urlstream.go | 17 - .../go-mod-core-contracts/clients/request.go | 353 - .../clients/types/errors.go | 51 - .../go-mod-core-contracts/models/action.go | 35 - .../models/actiontype.go | 30 - .../models/addressable.go | 120 - .../models/adminstate.go | 69 - .../go-mod-core-contracts/models/autoevent.go | 43 - .../models/callbackalert.go | 37 - .../go-mod-core-contracts/models/category.go | 55 - .../go-mod-core-contracts/models/channel.go | 35 - .../models/channel_type.go | 53 - .../go-mod-core-contracts/models/command.go | 129 - .../models/commandresponse.go | 73 - .../go-mod-core-contracts/models/constants.go | 28 - .../models/describedobject.go | 34 - .../go-mod-core-contracts/models/device.go | 170 - .../models/deviceprofile.go | 114 - .../models/devicereport.go | 40 - .../models/deviceresource.go | 66 - .../models/deviceservice.go | 129 - .../models/encryptiondetails.go | 29 - .../go-mod-core-contracts/models/errors.go | 32 - .../go-mod-core-contracts/models/event.go | 108 - .../go-mod-core-contracts/models/filter.go | 21 - .../go-mod-core-contracts/models/get.go | 44 - .../go-mod-core-contracts/models/interval.go | 168 - .../models/interval_action.go | 153 - .../go-mod-core-contracts/models/log_entry.go | 86 - .../models/notifications.go | 169 - .../models/operatingstate.go | 71 - .../models/profileproperty.go | 54 - .../models/profileresource.go | 32 - .../models/propertyvalue.go | 53 - .../models/provisionwatcher.go | 135 - .../go-mod-core-contracts/models/put.go | 42 - .../go-mod-core-contracts/models/reading.go | 178 - .../models/resourceoperation.go | 151 - .../go-mod-core-contracts/models/response.go | 54 - .../go-mod-core-contracts/models/severity.go | 51 - .../models/sma_operation.go | 64 - .../go-mod-core-contracts/models/status.go | 52 - .../models/subscription.go | 41 - .../models/timestamps.go | 44 - .../models/transmission.go | 147 - .../models/transmission_record.go | 35 - .../models/transmission_status.go | 62 - .../go-mod-core-contracts/models/units.go | 32 - .../go-mod-core-contracts/models/validator.go | 53 - .../models/value-descriptor.go | 164 - vendor/github.com/google/uuid/CHANGELOG.md | 10 - vendor/github.com/google/uuid/CONTRIBUTING.md | 26 - vendor/github.com/google/uuid/CONTRIBUTORS | 9 - vendor/github.com/google/uuid/LICENSE | 27 - vendor/github.com/google/uuid/README.md | 21 - vendor/github.com/google/uuid/dce.go | 80 - vendor/github.com/google/uuid/doc.go | 12 - vendor/github.com/google/uuid/hash.go | 53 - vendor/github.com/google/uuid/marshal.go | 38 - vendor/github.com/google/uuid/node.go | 90 - vendor/github.com/google/uuid/node_js.go | 12 - vendor/github.com/google/uuid/node_net.go | 33 - vendor/github.com/google/uuid/null.go | 118 - vendor/github.com/google/uuid/sql.go | 59 - vendor/github.com/google/uuid/time.go | 123 - vendor/github.com/google/uuid/util.go | 43 - vendor/github.com/google/uuid/uuid.go | 296 - vendor/github.com/google/uuid/version1.go | 44 - vendor/github.com/google/uuid/version4.go | 76 - .../github.com/gorilla/websocket/.gitignore | 25 - vendor/github.com/gorilla/websocket/AUTHORS | 9 - vendor/github.com/gorilla/websocket/LICENSE | 22 - vendor/github.com/gorilla/websocket/README.md | 39 - vendor/github.com/gorilla/websocket/client.go | 422 -- .../gorilla/websocket/compression.go | 148 - vendor/github.com/gorilla/websocket/conn.go | 1230 ---- vendor/github.com/gorilla/websocket/doc.go | 227 - vendor/github.com/gorilla/websocket/join.go | 42 - vendor/github.com/gorilla/websocket/json.go | 60 - vendor/github.com/gorilla/websocket/mask.go | 55 - .../github.com/gorilla/websocket/mask_safe.go | 16 - .../github.com/gorilla/websocket/prepared.go | 102 - vendor/github.com/gorilla/websocket/proxy.go | 77 - vendor/github.com/gorilla/websocket/server.go | 365 -- .../gorilla/websocket/tls_handshake.go | 21 - .../gorilla/websocket/tls_handshake_116.go | 21 - vendor/github.com/gorilla/websocket/util.go | 283 - .../gorilla/websocket/x_net_proxy.go | 473 -- vendor/github.com/klauspost/compress/LICENSE | 304 - .../klauspost/compress/flate/deflate.go | 1017 --- .../klauspost/compress/flate/dict_decoder.go | 184 - .../klauspost/compress/flate/fast_encoder.go | 193 - .../compress/flate/huffman_bit_writer.go | 1182 ---- .../klauspost/compress/flate/huffman_code.go | 417 -- .../compress/flate/huffman_sortByFreq.go | 159 - .../compress/flate/huffman_sortByLiteral.go | 201 - .../klauspost/compress/flate/inflate.go | 793 --- .../klauspost/compress/flate/inflate_gen.go | 1283 ---- .../klauspost/compress/flate/level1.go | 241 - .../klauspost/compress/flate/level2.go | 214 - .../klauspost/compress/flate/level3.go | 241 - .../klauspost/compress/flate/level4.go | 221 - .../klauspost/compress/flate/level5.go | 708 -- .../klauspost/compress/flate/level6.go | 325 - .../compress/flate/matchlen_amd64.go | 16 - .../klauspost/compress/flate/matchlen_amd64.s | 68 - .../compress/flate/matchlen_generic.go | 33 - .../klauspost/compress/flate/regmask_amd64.go | 37 - .../klauspost/compress/flate/regmask_other.go | 40 - .../klauspost/compress/flate/stateless.go | 318 - .../klauspost/compress/flate/token.go | 379 -- .../mainflux/agent/pkg/agent/config.go | 161 - .../mainflux/agent/pkg/agent/heartbeat.go | 79 - .../mainflux/agent/pkg/agent/service.go | 405 -- .../mainflux/agent/pkg/bootstrap/bootstrap.go | 228 - .../mainflux/agent/pkg/edgex/client.go | 125 - .../mainflux/agent/pkg/encoder/encoder.go | 26 - .../mainflux/agent/pkg/terminal/terminal.go | 126 - .../mainflux/export/pkg/config/config.go | 111 - vendor/github.com/mainflux/mainflux/LICENSE | 191 - .../mainflux/mainflux/bootstrap/README.md | 125 - .../mainflux/mainflux/bootstrap/configs.go | 116 - .../mainflux/mainflux/bootstrap/doc.go | 6 - .../mainflux/mainflux/bootstrap/reader.go | 95 - .../mainflux/mainflux/bootstrap/service.go | 486 -- .../mainflux/mainflux/bootstrap/state.go | 26 - vendor/github.com/mainflux/mainflux/doc.go | 6 - .../mainflux/mainflux/pkg/clients/clients.go | 169 - .../mainflux/mainflux/pkg/clients/doc.go | 6 - .../mainflux/mainflux/pkg/clients/errors.go | 17 - .../mainflux/mainflux/pkg/clients/page.go | 21 - .../mainflux/mainflux/pkg/clients/roles.go | 60 - .../mainflux/mainflux/pkg/clients/status.go | 80 - .../mainflux/mainflux/pkg/clients/types.go | 7 - .../mainflux/mainflux/pkg/errors/doc.go | 5 - .../mainflux/mainflux/pkg/messaging/pubsub.go | 43 - .../mainflux/mainflux/pkg/sdk/go/README.md | 82 - .../mainflux/mainflux/pkg/sdk/go/doc.go | 5 - .../mainflux/pkg/transformers/README.md | 10 - .../mainflux/pkg/transformers/senml/README.md | 4 - .../{ => v2}/LICENSE | 0 .../{ => v2}/NOTICE | 0 .../{ => v2}/pbutil/.gitignore | 0 .../{ => v2}/pbutil/Makefile | 0 .../{ => v2}/pbutil/decode.go | 16 +- .../{ => v2}/pbutil/doc.go | 0 .../{ => v2}/pbutil/encode.go | 5 +- vendor/github.com/nats-io/nats.go/.gitignore | 45 - .../github.com/nats-io/nats.go/.golangci.yaml | 13 - vendor/github.com/nats-io/nats.go/.travis.yml | 36 - vendor/github.com/nats-io/nats.go/.words | 106 - .../github.com/nats-io/nats.go/.words.readme | 25 - .../nats-io/nats.go/CODE-OF-CONDUCT.md | 3 - .../github.com/nats-io/nats.go/GOVERNANCE.md | 3 - vendor/github.com/nats-io/nats.go/LICENSE | 201 - .../github.com/nats-io/nats.go/MAINTAINERS.md | 8 - vendor/github.com/nats-io/nats.go/README.md | 480 -- vendor/github.com/nats-io/nats.go/context.go | 244 - .../nats-io/nats.go/dependencies.md | 13 - vendor/github.com/nats-io/nats.go/enc.go | 269 - .../nats.go/encoders/builtin/default_enc.go | 117 - .../nats.go/encoders/builtin/gob_enc.go | 45 - .../nats.go/encoders/builtin/json_enc.go | 56 - vendor/github.com/nats-io/nats.go/go_test.mod | 22 - vendor/github.com/nats-io/nats.go/go_test.sum | 48 - .../nats-io/nats.go/internal/parser/parse.go | 104 - vendor/github.com/nats-io/nats.go/js.go | 3812 ----------- vendor/github.com/nats-io/nats.go/jserrors.go | 235 - vendor/github.com/nats-io/nats.go/jsm.go | 1665 ----- vendor/github.com/nats-io/nats.go/kv.go | 1119 ---- .../nats-io/nats.go/legacy_jetstream.md | 83 - vendor/github.com/nats-io/nats.go/nats.go | 5673 ----------------- vendor/github.com/nats-io/nats.go/netchan.go | 111 - vendor/github.com/nats-io/nats.go/object.go | 1386 ---- vendor/github.com/nats-io/nats.go/parser.go | 554 -- vendor/github.com/nats-io/nats.go/rand.go | 29 - vendor/github.com/nats-io/nats.go/timer.go | 56 - vendor/github.com/nats-io/nats.go/util/tls.go | 28 - .../nats-io/nats.go/util/tls_go17.go | 50 - vendor/github.com/nats-io/nats.go/ws.go | 780 --- vendor/github.com/nats-io/nkeys/.gitignore | 16 - .../github.com/nats-io/nkeys/.goreleaser.yml | 63 - vendor/github.com/nats-io/nkeys/GOVERNANCE.md | 3 - vendor/github.com/nats-io/nkeys/LICENSE | 201 - .../github.com/nats-io/nkeys/MAINTAINERS.md | 8 - vendor/github.com/nats-io/nkeys/README.md | 69 - vendor/github.com/nats-io/nkeys/TODO.md | 5 - vendor/github.com/nats-io/nkeys/crc16.go | 68 - .../github.com/nats-io/nkeys/creds_utils.go | 78 - .../github.com/nats-io/nkeys/dependencies.md | 12 - vendor/github.com/nats-io/nkeys/errors.go | 50 - vendor/github.com/nats-io/nkeys/keypair.go | 146 - vendor/github.com/nats-io/nkeys/nkeys.go | 100 - vendor/github.com/nats-io/nkeys/public.go | 86 - vendor/github.com/nats-io/nkeys/strkey.go | 314 - vendor/github.com/nats-io/nkeys/xkeys.go | 184 - vendor/github.com/nats-io/nuid/.gitignore | 24 - vendor/github.com/nats-io/nuid/.travis.yml | 17 - vendor/github.com/nats-io/nuid/GOVERNANCE.md | 3 - vendor/github.com/nats-io/nuid/LICENSE | 201 - vendor/github.com/nats-io/nuid/MAINTAINERS.md | 6 - vendor/github.com/nats-io/nuid/README.md | 47 - vendor/github.com/nats-io/nuid/nuid.go | 135 - .../pelletier/go-toml/.dockerignore | 2 - .../github.com/pelletier/go-toml/.gitignore | 5 - .../pelletier/go-toml/CONTRIBUTING.md | 132 - .../github.com/pelletier/go-toml/Dockerfile | 11 - vendor/github.com/pelletier/go-toml/LICENSE | 247 - vendor/github.com/pelletier/go-toml/Makefile | 29 - .../go-toml/PULL_REQUEST_TEMPLATE.md | 5 - vendor/github.com/pelletier/go-toml/README.md | 176 - .../github.com/pelletier/go-toml/SECURITY.md | 19 - .../pelletier/go-toml/azure-pipelines.yml | 188 - .../github.com/pelletier/go-toml/benchmark.sh | 35 - vendor/github.com/pelletier/go-toml/doc.go | 23 - .../pelletier/go-toml/example-crlf.toml | 30 - .../github.com/pelletier/go-toml/example.toml | 30 - vendor/github.com/pelletier/go-toml/fuzz.go | 31 - vendor/github.com/pelletier/go-toml/fuzz.sh | 15 - .../pelletier/go-toml/keysparsing.go | 112 - vendor/github.com/pelletier/go-toml/lexer.go | 1031 --- .../github.com/pelletier/go-toml/localtime.go | 287 - .../github.com/pelletier/go-toml/marshal.go | 1308 ---- .../go-toml/marshal_OrderPreserve_test.toml | 39 - .../pelletier/go-toml/marshal_test.toml | 39 - vendor/github.com/pelletier/go-toml/parser.go | 507 -- .../github.com/pelletier/go-toml/position.go | 29 - vendor/github.com/pelletier/go-toml/token.go | 136 - vendor/github.com/pelletier/go-toml/toml.go | 533 -- .../github.com/pelletier/go-toml/tomlpub.go | 71 - .../pelletier/go-toml/tomltree_create.go | 155 - .../pelletier/go-toml/tomltree_write.go | 552 -- .../pelletier/go-toml/tomltree_writepub.go | 6 - .../prometheus/common/expfmt/decode.go | 2 +- .../prometheus/common/expfmt/encode.go | 2 +- vendor/golang.org/x/crypto/LICENSE | 27 - vendor/golang.org/x/crypto/PATENTS | 22 - vendor/golang.org/x/crypto/blake2b/blake2b.go | 291 - .../x/crypto/blake2b/blake2bAVX2_amd64.go | 38 - .../x/crypto/blake2b/blake2bAVX2_amd64.s | 745 --- .../x/crypto/blake2b/blake2b_amd64.go | 25 - .../x/crypto/blake2b/blake2b_amd64.s | 279 - .../x/crypto/blake2b/blake2b_generic.go | 182 - .../x/crypto/blake2b/blake2b_ref.go | 12 - vendor/golang.org/x/crypto/blake2b/blake2x.go | 177 - .../golang.org/x/crypto/blake2b/register.go | 33 - .../x/crypto/curve25519/curve25519.go | 59 - .../x/crypto/curve25519/curve25519_compat.go | 105 - .../x/crypto/curve25519/curve25519_go120.go | 46 - .../x/crypto/curve25519/internal/field/README | 7 - .../x/crypto/curve25519/internal/field/fe.go | 416 -- .../curve25519/internal/field/fe_amd64.go | 16 - .../curve25519/internal/field/fe_amd64.s | 379 -- .../internal/field/fe_amd64_noasm.go | 12 - .../curve25519/internal/field/fe_arm64.go | 16 - .../curve25519/internal/field/fe_arm64.s | 43 - .../internal/field/fe_arm64_noasm.go | 12 - .../curve25519/internal/field/fe_generic.go | 264 - .../curve25519/internal/field/sync.checkpoint | 1 - .../crypto/curve25519/internal/field/sync.sh | 19 - vendor/golang.org/x/crypto/ed25519/ed25519.go | 71 - .../x/crypto/internal/alias/alias.go | 32 - .../x/crypto/internal/alias/alias_purego.go | 35 - .../x/crypto/internal/poly1305/bits_compat.go | 40 - .../x/crypto/internal/poly1305/bits_go1.13.go | 22 - .../x/crypto/internal/poly1305/mac_noasm.go | 10 - .../x/crypto/internal/poly1305/poly1305.go | 99 - .../x/crypto/internal/poly1305/sum_amd64.go | 48 - .../x/crypto/internal/poly1305/sum_amd64.s | 109 - .../x/crypto/internal/poly1305/sum_generic.go | 309 - .../x/crypto/internal/poly1305/sum_ppc64le.go | 48 - .../x/crypto/internal/poly1305/sum_ppc64le.s | 182 - .../x/crypto/internal/poly1305/sum_s390x.go | 77 - .../x/crypto/internal/poly1305/sum_s390x.s | 504 -- vendor/golang.org/x/crypto/nacl/box/box.go | 182 - .../x/crypto/nacl/secretbox/secretbox.go | 173 - .../x/crypto/salsa20/salsa/hsalsa20.go | 146 - .../x/crypto/salsa20/salsa/salsa208.go | 201 - .../x/crypto/salsa20/salsa/salsa20_amd64.go | 24 - .../x/crypto/salsa20/salsa/salsa20_amd64.s | 881 --- .../x/crypto/salsa20/salsa/salsa20_noasm.go | 15 - .../x/crypto/salsa20/salsa/salsa20_ref.go | 233 - .../golang.org/x/net/internal/socks/client.go | 168 - .../golang.org/x/net/internal/socks/socks.go | 317 - vendor/golang.org/x/net/proxy/dial.go | 54 - vendor/golang.org/x/net/proxy/direct.go | 31 - vendor/golang.org/x/net/proxy/per_host.go | 155 - vendor/golang.org/x/net/proxy/proxy.go | 149 - vendor/golang.org/x/net/proxy/socks5.go | 42 - .../golang.org/x/sync/semaphore/semaphore.go | 136 - vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s | 18 - vendor/golang.org/x/sys/cpu/byteorder.go | 66 - vendor/golang.org/x/sys/cpu/cpu.go | 290 - vendor/golang.org/x/sys/cpu/cpu_aix.go | 34 - vendor/golang.org/x/sys/cpu/cpu_arm.go | 73 - vendor/golang.org/x/sys/cpu/cpu_arm64.go | 172 - vendor/golang.org/x/sys/cpu/cpu_arm64.s | 32 - vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go | 12 - vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go | 22 - vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 17 - .../golang.org/x/sys/cpu/cpu_gccgo_arm64.go | 12 - .../golang.org/x/sys/cpu/cpu_gccgo_s390x.go | 23 - vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c | 39 - vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go | 33 - vendor/golang.org/x/sys/cpu/cpu_linux.go | 16 - vendor/golang.org/x/sys/cpu/cpu_linux_arm.go | 39 - .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 111 - .../golang.org/x/sys/cpu/cpu_linux_mips64x.go | 24 - .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 10 - .../golang.org/x/sys/cpu/cpu_linux_ppc64x.go | 32 - .../golang.org/x/sys/cpu/cpu_linux_s390x.go | 40 - vendor/golang.org/x/sys/cpu/cpu_loong64.go | 13 - vendor/golang.org/x/sys/cpu/cpu_mips64x.go | 16 - vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 12 - .../golang.org/x/sys/cpu/cpu_netbsd_arm64.go | 173 - .../golang.org/x/sys/cpu/cpu_openbsd_arm64.go | 65 - .../golang.org/x/sys/cpu/cpu_openbsd_arm64.s | 11 - vendor/golang.org/x/sys/cpu/cpu_other_arm.go | 10 - .../golang.org/x/sys/cpu/cpu_other_arm64.go | 10 - .../golang.org/x/sys/cpu/cpu_other_mips64x.go | 13 - .../golang.org/x/sys/cpu/cpu_other_ppc64x.go | 15 - .../golang.org/x/sys/cpu/cpu_other_riscv64.go | 12 - vendor/golang.org/x/sys/cpu/cpu_ppc64x.go | 17 - vendor/golang.org/x/sys/cpu/cpu_riscv64.go | 12 - vendor/golang.org/x/sys/cpu/cpu_s390x.go | 172 - vendor/golang.org/x/sys/cpu/cpu_s390x.s | 58 - vendor/golang.org/x/sys/cpu/cpu_wasm.go | 18 - vendor/golang.org/x/sys/cpu/cpu_x86.go | 152 - vendor/golang.org/x/sys/cpu/cpu_x86.s | 28 - vendor/golang.org/x/sys/cpu/cpu_zos.go | 10 - vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go | 25 - vendor/golang.org/x/sys/cpu/endian_big.go | 11 - vendor/golang.org/x/sys/cpu/endian_little.go | 11 - vendor/golang.org/x/sys/cpu/hwcap_linux.go | 71 - vendor/golang.org/x/sys/cpu/parse.go | 43 - .../x/sys/cpu/proc_cpuinfo_linux.go | 54 - vendor/golang.org/x/sys/cpu/runtime_auxv.go | 16 - .../x/sys/cpu/runtime_auxv_go121.go | 19 - .../golang.org/x/sys/cpu/syscall_aix_gccgo.go | 27 - .../x/sys/cpu/syscall_aix_ppc64_gc.go | 36 - vendor/google.golang.org/grpc/README.md | 2 +- .../grpc/attributes/attributes.go | 4 +- .../grpc/balancer/balancer.go | 15 + vendor/google.golang.org/grpc/clientconn.go | 18 +- vendor/google.golang.org/grpc/dialoptions.go | 5 +- .../grpc/encoding/encoding.go | 13 +- .../grpc/internal/backoff/backoff.go | 36 + .../grpc/internal/internal.go | 6 + .../grpc/internal/status/status.go | 28 + .../grpc/internal/transport/handler_server.go | 13 +- .../grpc/internal/transport/http2_client.go | 13 +- .../grpc/internal/transport/http2_server.go | 14 +- .../grpc/internal/transport/http_util.go | 18 +- .../grpc/internal/transport/transport.go | 2 +- vendor/google.golang.org/grpc/server.go | 136 +- vendor/google.golang.org/grpc/tap/tap.go | 6 + vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 3 + vendor/modules.txt | 98 +- 534 files changed, 1305 insertions(+), 65213 deletions(-) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/.dockerignore (100%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/.gitignore (78%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/.golangci.yml (97%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/ADOPTERS.md (65%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/CHANGELOG.md (97%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/CONTRIBUTING.md (81%) rename vendor/github.com/{mainflux/agent => absmach/magistrala}/LICENSE (99%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/MAINTAINERS (55%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/Makefile (71%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/README.md (66%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/api.go (87%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/auth.pb.go (69%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/auth.proto (98%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/auth_grpc.pb.go (94%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/config.toml (100%) create mode 100644 vendor/github.com/absmach/magistrala/doc.go rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/health.go (88%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/internal/apiutil/errors.go (98%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/internal/apiutil/responses.go (87%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/internal/apiutil/token.go (97%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/internal/apiutil/transport.go (96%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/logger/doc.go (84%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/logger/exit.go (86%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/logger/level.go (97%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/logger/logger.go (98%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/logger/mock.go (93%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/errors/README.md (100%) create mode 100644 vendor/github.com/absmach/magistrala/pkg/errors/doc.go rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/errors/errors.go (97%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/errors/sdk_errors.go (96%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/errors/types.go (98%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/messaging/README.md (100%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/messaging/message.pb.go (98%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/messaging/message.proto (75%) create mode 100644 vendor/github.com/absmach/magistrala/pkg/messaging/pubsub.go create mode 100644 vendor/github.com/absmach/magistrala/pkg/sdk/go/README.md rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/sdk/go/bootstrap.go (86%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/sdk/go/certs.go (86%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/sdk/go/channels.go (82%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/sdk/go/consumers.go (85%) create mode 100644 vendor/github.com/absmach/magistrala/pkg/sdk/go/doc.go rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/sdk/go/groups.go (83%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/sdk/go/health.go (91%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/sdk/go/message.go (82%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/sdk/go/metadata.go (76%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/sdk/go/requests.go (98%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/sdk/go/responses.go (95%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/sdk/go/sdk.go (98%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/sdk/go/things.go (85%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/sdk/go/tokens.go (86%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/sdk/go/users.go (83%) create mode 100644 vendor/github.com/absmach/magistrala/pkg/transformers/README.md rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/transformers/doc.go (61%) create mode 100644 vendor/github.com/absmach/magistrala/pkg/transformers/senml/README.md rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/transformers/senml/doc.go (77%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/transformers/senml/message.go (100%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/transformers/senml/transformer.go (91%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/transformers/transformer.go (60%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/uuid/README.md (100%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/uuid/doc.go (77%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/uuid/mock.go (78%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/pkg/uuid/uuid.go (73%) rename vendor/github.com/{mainflux/mainflux => absmach/magistrala}/uuid.go (80%) delete mode 100644 vendor/github.com/creack/pty/.gitignore delete mode 100644 vendor/github.com/creack/pty/Dockerfile.golang delete mode 100644 vendor/github.com/creack/pty/Dockerfile.riscv delete mode 100644 vendor/github.com/creack/pty/LICENSE delete mode 100644 vendor/github.com/creack/pty/README.md delete mode 100644 vendor/github.com/creack/pty/asm_solaris_amd64.s delete mode 100644 vendor/github.com/creack/pty/doc.go delete mode 100644 vendor/github.com/creack/pty/ioctl.go delete mode 100644 vendor/github.com/creack/pty/ioctl_bsd.go delete mode 100644 vendor/github.com/creack/pty/ioctl_solaris.go delete mode 100644 vendor/github.com/creack/pty/ioctl_unsupported.go delete mode 100644 vendor/github.com/creack/pty/mktypes.bash delete mode 100644 vendor/github.com/creack/pty/pty_darwin.go delete mode 100644 vendor/github.com/creack/pty/pty_dragonfly.go delete mode 100644 vendor/github.com/creack/pty/pty_freebsd.go delete mode 100644 vendor/github.com/creack/pty/pty_linux.go delete mode 100644 vendor/github.com/creack/pty/pty_netbsd.go delete mode 100644 vendor/github.com/creack/pty/pty_openbsd.go delete mode 100644 vendor/github.com/creack/pty/pty_solaris.go delete mode 100644 vendor/github.com/creack/pty/pty_unsupported.go delete mode 100644 vendor/github.com/creack/pty/run.go delete mode 100644 vendor/github.com/creack/pty/start.go delete mode 100644 vendor/github.com/creack/pty/start_windows.go delete mode 100644 vendor/github.com/creack/pty/test_crosscompile.sh delete mode 100644 vendor/github.com/creack/pty/winsize.go delete mode 100644 vendor/github.com/creack/pty/winsize_unix.go delete mode 100644 vendor/github.com/creack/pty/winsize_unsupported.go delete mode 100644 vendor/github.com/creack/pty/ztypes_386.go delete mode 100644 vendor/github.com/creack/pty/ztypes_amd64.go delete mode 100644 vendor/github.com/creack/pty/ztypes_arm.go delete mode 100644 vendor/github.com/creack/pty/ztypes_arm64.go delete mode 100644 vendor/github.com/creack/pty/ztypes_dragonfly_amd64.go delete mode 100644 vendor/github.com/creack/pty/ztypes_freebsd_386.go delete mode 100644 vendor/github.com/creack/pty/ztypes_freebsd_amd64.go delete mode 100644 vendor/github.com/creack/pty/ztypes_freebsd_arm.go delete mode 100644 vendor/github.com/creack/pty/ztypes_freebsd_arm64.go delete mode 100644 vendor/github.com/creack/pty/ztypes_freebsd_ppc64.go delete mode 100644 vendor/github.com/creack/pty/ztypes_loong64.go delete mode 100644 vendor/github.com/creack/pty/ztypes_mipsx.go delete mode 100644 vendor/github.com/creack/pty/ztypes_netbsd_32bit_int.go delete mode 100644 vendor/github.com/creack/pty/ztypes_openbsd_32bit_int.go delete mode 100644 vendor/github.com/creack/pty/ztypes_ppc64.go delete mode 100644 vendor/github.com/creack/pty/ztypes_ppc64le.go delete mode 100644 vendor/github.com/creack/pty/ztypes_riscvx.go delete mode 100644 vendor/github.com/creack/pty/ztypes_s390x.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/.gitignore delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/CONTRIBUTING.md delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/LICENSE delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/NOTICE.md delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/README.md delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/backoff.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/client.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/components.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/edl-v10 delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/epl-v20 delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/filestore.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/memstore.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/memstore_ordered.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/message.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/messageids.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/net.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/netconn.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/oops.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/options.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/options_reader.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/connack.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/connect.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/disconnect.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/pingreq.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/pingresp.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/puback.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/pubcomp.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/publish.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrec.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrel.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/suback.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/subscribe.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/unsuback.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/unsubscribe.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/ping.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/router.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/status.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/store.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/token.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/topic.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/trace.go delete mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/websocket.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/LICENSE delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/constants.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/context.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/doc.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/interfaces/url.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/interfaces/urlstream.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/request.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/types/errors.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/action.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/actiontype.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/addressable.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/adminstate.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/autoevent.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/callbackalert.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/category.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/channel.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/channel_type.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/command.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/commandresponse.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/constants.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/describedobject.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/device.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/deviceprofile.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/devicereport.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/deviceresource.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/deviceservice.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/encryptiondetails.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/errors.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/event.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/filter.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/get.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/interval.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/interval_action.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/log_entry.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/notifications.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/operatingstate.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/profileproperty.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/profileresource.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/propertyvalue.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/provisionwatcher.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/put.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/reading.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/resourceoperation.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/response.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/severity.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/sma_operation.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/status.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/subscription.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/timestamps.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/transmission.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/transmission_record.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/transmission_status.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/units.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/validator.go delete mode 100644 vendor/github.com/edgexfoundry/go-mod-core-contracts/models/value-descriptor.go delete mode 100644 vendor/github.com/google/uuid/CHANGELOG.md delete mode 100644 vendor/github.com/google/uuid/CONTRIBUTING.md delete mode 100644 vendor/github.com/google/uuid/CONTRIBUTORS delete mode 100644 vendor/github.com/google/uuid/LICENSE delete mode 100644 vendor/github.com/google/uuid/README.md delete mode 100644 vendor/github.com/google/uuid/dce.go delete mode 100644 vendor/github.com/google/uuid/doc.go delete mode 100644 vendor/github.com/google/uuid/hash.go delete mode 100644 vendor/github.com/google/uuid/marshal.go delete mode 100644 vendor/github.com/google/uuid/node.go delete mode 100644 vendor/github.com/google/uuid/node_js.go delete mode 100644 vendor/github.com/google/uuid/node_net.go delete mode 100644 vendor/github.com/google/uuid/null.go delete mode 100644 vendor/github.com/google/uuid/sql.go delete mode 100644 vendor/github.com/google/uuid/time.go delete mode 100644 vendor/github.com/google/uuid/util.go delete mode 100644 vendor/github.com/google/uuid/uuid.go delete mode 100644 vendor/github.com/google/uuid/version1.go delete mode 100644 vendor/github.com/google/uuid/version4.go delete mode 100644 vendor/github.com/gorilla/websocket/.gitignore delete mode 100644 vendor/github.com/gorilla/websocket/AUTHORS delete mode 100644 vendor/github.com/gorilla/websocket/LICENSE delete mode 100644 vendor/github.com/gorilla/websocket/README.md delete mode 100644 vendor/github.com/gorilla/websocket/client.go delete mode 100644 vendor/github.com/gorilla/websocket/compression.go delete mode 100644 vendor/github.com/gorilla/websocket/conn.go delete mode 100644 vendor/github.com/gorilla/websocket/doc.go delete mode 100644 vendor/github.com/gorilla/websocket/join.go delete mode 100644 vendor/github.com/gorilla/websocket/json.go delete mode 100644 vendor/github.com/gorilla/websocket/mask.go delete mode 100644 vendor/github.com/gorilla/websocket/mask_safe.go delete mode 100644 vendor/github.com/gorilla/websocket/prepared.go delete mode 100644 vendor/github.com/gorilla/websocket/proxy.go delete mode 100644 vendor/github.com/gorilla/websocket/server.go delete mode 100644 vendor/github.com/gorilla/websocket/tls_handshake.go delete mode 100644 vendor/github.com/gorilla/websocket/tls_handshake_116.go delete mode 100644 vendor/github.com/gorilla/websocket/util.go delete mode 100644 vendor/github.com/gorilla/websocket/x_net_proxy.go delete mode 100644 vendor/github.com/klauspost/compress/LICENSE delete mode 100644 vendor/github.com/klauspost/compress/flate/deflate.go delete mode 100644 vendor/github.com/klauspost/compress/flate/dict_decoder.go delete mode 100644 vendor/github.com/klauspost/compress/flate/fast_encoder.go delete mode 100644 vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go delete mode 100644 vendor/github.com/klauspost/compress/flate/huffman_code.go delete mode 100644 vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go delete mode 100644 vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go delete mode 100644 vendor/github.com/klauspost/compress/flate/inflate.go delete mode 100644 vendor/github.com/klauspost/compress/flate/inflate_gen.go delete mode 100644 vendor/github.com/klauspost/compress/flate/level1.go delete mode 100644 vendor/github.com/klauspost/compress/flate/level2.go delete mode 100644 vendor/github.com/klauspost/compress/flate/level3.go delete mode 100644 vendor/github.com/klauspost/compress/flate/level4.go delete mode 100644 vendor/github.com/klauspost/compress/flate/level5.go delete mode 100644 vendor/github.com/klauspost/compress/flate/level6.go delete mode 100644 vendor/github.com/klauspost/compress/flate/matchlen_amd64.go delete mode 100644 vendor/github.com/klauspost/compress/flate/matchlen_amd64.s delete mode 100644 vendor/github.com/klauspost/compress/flate/matchlen_generic.go delete mode 100644 vendor/github.com/klauspost/compress/flate/regmask_amd64.go delete mode 100644 vendor/github.com/klauspost/compress/flate/regmask_other.go delete mode 100644 vendor/github.com/klauspost/compress/flate/stateless.go delete mode 100644 vendor/github.com/klauspost/compress/flate/token.go delete mode 100644 vendor/github.com/mainflux/agent/pkg/agent/config.go delete mode 100644 vendor/github.com/mainflux/agent/pkg/agent/heartbeat.go delete mode 100644 vendor/github.com/mainflux/agent/pkg/agent/service.go delete mode 100644 vendor/github.com/mainflux/agent/pkg/bootstrap/bootstrap.go delete mode 100644 vendor/github.com/mainflux/agent/pkg/edgex/client.go delete mode 100644 vendor/github.com/mainflux/agent/pkg/encoder/encoder.go delete mode 100644 vendor/github.com/mainflux/agent/pkg/terminal/terminal.go delete mode 100644 vendor/github.com/mainflux/export/pkg/config/config.go delete mode 100644 vendor/github.com/mainflux/mainflux/LICENSE delete mode 100644 vendor/github.com/mainflux/mainflux/bootstrap/README.md delete mode 100644 vendor/github.com/mainflux/mainflux/bootstrap/configs.go delete mode 100644 vendor/github.com/mainflux/mainflux/bootstrap/doc.go delete mode 100644 vendor/github.com/mainflux/mainflux/bootstrap/reader.go delete mode 100644 vendor/github.com/mainflux/mainflux/bootstrap/service.go delete mode 100644 vendor/github.com/mainflux/mainflux/bootstrap/state.go delete mode 100644 vendor/github.com/mainflux/mainflux/doc.go delete mode 100644 vendor/github.com/mainflux/mainflux/pkg/clients/clients.go delete mode 100644 vendor/github.com/mainflux/mainflux/pkg/clients/doc.go delete mode 100644 vendor/github.com/mainflux/mainflux/pkg/clients/errors.go delete mode 100644 vendor/github.com/mainflux/mainflux/pkg/clients/page.go delete mode 100644 vendor/github.com/mainflux/mainflux/pkg/clients/roles.go delete mode 100644 vendor/github.com/mainflux/mainflux/pkg/clients/status.go delete mode 100644 vendor/github.com/mainflux/mainflux/pkg/clients/types.go delete mode 100644 vendor/github.com/mainflux/mainflux/pkg/errors/doc.go delete mode 100644 vendor/github.com/mainflux/mainflux/pkg/messaging/pubsub.go delete mode 100644 vendor/github.com/mainflux/mainflux/pkg/sdk/go/README.md delete mode 100644 vendor/github.com/mainflux/mainflux/pkg/sdk/go/doc.go delete mode 100644 vendor/github.com/mainflux/mainflux/pkg/transformers/README.md delete mode 100644 vendor/github.com/mainflux/mainflux/pkg/transformers/senml/README.md rename vendor/github.com/matttproud/golang_protobuf_extensions/{ => v2}/LICENSE (100%) rename vendor/github.com/matttproud/golang_protobuf_extensions/{ => v2}/NOTICE (100%) rename vendor/github.com/matttproud/golang_protobuf_extensions/{ => v2}/pbutil/.gitignore (100%) rename vendor/github.com/matttproud/golang_protobuf_extensions/{ => v2}/pbutil/Makefile (100%) rename vendor/github.com/matttproud/golang_protobuf_extensions/{ => v2}/pbutil/decode.go (83%) rename vendor/github.com/matttproud/golang_protobuf_extensions/{ => v2}/pbutil/doc.go (100%) rename vendor/github.com/matttproud/golang_protobuf_extensions/{ => v2}/pbutil/encode.go (91%) delete mode 100644 vendor/github.com/nats-io/nats.go/.gitignore delete mode 100644 vendor/github.com/nats-io/nats.go/.golangci.yaml delete mode 100644 vendor/github.com/nats-io/nats.go/.travis.yml delete mode 100644 vendor/github.com/nats-io/nats.go/.words delete mode 100644 vendor/github.com/nats-io/nats.go/.words.readme delete mode 100644 vendor/github.com/nats-io/nats.go/CODE-OF-CONDUCT.md delete mode 100644 vendor/github.com/nats-io/nats.go/GOVERNANCE.md delete mode 100644 vendor/github.com/nats-io/nats.go/LICENSE delete mode 100644 vendor/github.com/nats-io/nats.go/MAINTAINERS.md delete mode 100644 vendor/github.com/nats-io/nats.go/README.md delete mode 100644 vendor/github.com/nats-io/nats.go/context.go delete mode 100644 vendor/github.com/nats-io/nats.go/dependencies.md delete mode 100644 vendor/github.com/nats-io/nats.go/enc.go delete mode 100644 vendor/github.com/nats-io/nats.go/encoders/builtin/default_enc.go delete mode 100644 vendor/github.com/nats-io/nats.go/encoders/builtin/gob_enc.go delete mode 100644 vendor/github.com/nats-io/nats.go/encoders/builtin/json_enc.go delete mode 100644 vendor/github.com/nats-io/nats.go/go_test.mod delete mode 100644 vendor/github.com/nats-io/nats.go/go_test.sum delete mode 100644 vendor/github.com/nats-io/nats.go/internal/parser/parse.go delete mode 100644 vendor/github.com/nats-io/nats.go/js.go delete mode 100644 vendor/github.com/nats-io/nats.go/jserrors.go delete mode 100644 vendor/github.com/nats-io/nats.go/jsm.go delete mode 100644 vendor/github.com/nats-io/nats.go/kv.go delete mode 100644 vendor/github.com/nats-io/nats.go/legacy_jetstream.md delete mode 100644 vendor/github.com/nats-io/nats.go/nats.go delete mode 100644 vendor/github.com/nats-io/nats.go/netchan.go delete mode 100644 vendor/github.com/nats-io/nats.go/object.go delete mode 100644 vendor/github.com/nats-io/nats.go/parser.go delete mode 100644 vendor/github.com/nats-io/nats.go/rand.go delete mode 100644 vendor/github.com/nats-io/nats.go/timer.go delete mode 100644 vendor/github.com/nats-io/nats.go/util/tls.go delete mode 100644 vendor/github.com/nats-io/nats.go/util/tls_go17.go delete mode 100644 vendor/github.com/nats-io/nats.go/ws.go delete mode 100644 vendor/github.com/nats-io/nkeys/.gitignore delete mode 100644 vendor/github.com/nats-io/nkeys/.goreleaser.yml delete mode 100644 vendor/github.com/nats-io/nkeys/GOVERNANCE.md delete mode 100644 vendor/github.com/nats-io/nkeys/LICENSE delete mode 100644 vendor/github.com/nats-io/nkeys/MAINTAINERS.md delete mode 100644 vendor/github.com/nats-io/nkeys/README.md delete mode 100644 vendor/github.com/nats-io/nkeys/TODO.md delete mode 100644 vendor/github.com/nats-io/nkeys/crc16.go delete mode 100644 vendor/github.com/nats-io/nkeys/creds_utils.go delete mode 100644 vendor/github.com/nats-io/nkeys/dependencies.md delete mode 100644 vendor/github.com/nats-io/nkeys/errors.go delete mode 100644 vendor/github.com/nats-io/nkeys/keypair.go delete mode 100644 vendor/github.com/nats-io/nkeys/nkeys.go delete mode 100644 vendor/github.com/nats-io/nkeys/public.go delete mode 100644 vendor/github.com/nats-io/nkeys/strkey.go delete mode 100644 vendor/github.com/nats-io/nkeys/xkeys.go delete mode 100644 vendor/github.com/nats-io/nuid/.gitignore delete mode 100644 vendor/github.com/nats-io/nuid/.travis.yml delete mode 100644 vendor/github.com/nats-io/nuid/GOVERNANCE.md delete mode 100644 vendor/github.com/nats-io/nuid/LICENSE delete mode 100644 vendor/github.com/nats-io/nuid/MAINTAINERS.md delete mode 100644 vendor/github.com/nats-io/nuid/README.md delete mode 100644 vendor/github.com/nats-io/nuid/nuid.go delete mode 100644 vendor/github.com/pelletier/go-toml/.dockerignore delete mode 100644 vendor/github.com/pelletier/go-toml/.gitignore delete mode 100644 vendor/github.com/pelletier/go-toml/CONTRIBUTING.md delete mode 100644 vendor/github.com/pelletier/go-toml/Dockerfile delete mode 100644 vendor/github.com/pelletier/go-toml/LICENSE delete mode 100644 vendor/github.com/pelletier/go-toml/Makefile delete mode 100644 vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md delete mode 100644 vendor/github.com/pelletier/go-toml/README.md delete mode 100644 vendor/github.com/pelletier/go-toml/SECURITY.md delete mode 100644 vendor/github.com/pelletier/go-toml/azure-pipelines.yml delete mode 100644 vendor/github.com/pelletier/go-toml/benchmark.sh delete mode 100644 vendor/github.com/pelletier/go-toml/doc.go delete mode 100644 vendor/github.com/pelletier/go-toml/example-crlf.toml delete mode 100644 vendor/github.com/pelletier/go-toml/example.toml delete mode 100644 vendor/github.com/pelletier/go-toml/fuzz.go delete mode 100644 vendor/github.com/pelletier/go-toml/fuzz.sh delete mode 100644 vendor/github.com/pelletier/go-toml/keysparsing.go delete mode 100644 vendor/github.com/pelletier/go-toml/lexer.go delete mode 100644 vendor/github.com/pelletier/go-toml/localtime.go delete mode 100644 vendor/github.com/pelletier/go-toml/marshal.go delete mode 100644 vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml delete mode 100644 vendor/github.com/pelletier/go-toml/marshal_test.toml delete mode 100644 vendor/github.com/pelletier/go-toml/parser.go delete mode 100644 vendor/github.com/pelletier/go-toml/position.go delete mode 100644 vendor/github.com/pelletier/go-toml/token.go delete mode 100644 vendor/github.com/pelletier/go-toml/toml.go delete mode 100644 vendor/github.com/pelletier/go-toml/tomlpub.go delete mode 100644 vendor/github.com/pelletier/go-toml/tomltree_create.go delete mode 100644 vendor/github.com/pelletier/go-toml/tomltree_write.go delete mode 100644 vendor/github.com/pelletier/go-toml/tomltree_writepub.go delete mode 100644 vendor/golang.org/x/crypto/LICENSE delete mode 100644 vendor/golang.org/x/crypto/PATENTS delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_generic.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_ref.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2x.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/register.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519_compat.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519_go120.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/README delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh delete mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519.go delete mode 100644 vendor/golang.org/x/crypto/internal/alias/alias.go delete mode 100644 vendor/golang.org/x/crypto/internal/alias/alias_purego.go delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/poly1305.go delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s delete mode 100644 vendor/golang.org/x/crypto/nacl/box/box.go delete mode 100644 vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go delete mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go delete mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go delete mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go delete mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s delete mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go delete mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go delete mode 100644 vendor/golang.org/x/net/internal/socks/client.go delete mode 100644 vendor/golang.org/x/net/internal/socks/socks.go delete mode 100644 vendor/golang.org/x/net/proxy/dial.go delete mode 100644 vendor/golang.org/x/net/proxy/direct.go delete mode 100644 vendor/golang.org/x/net/proxy/per_host.go delete mode 100644 vendor/golang.org/x/net/proxy/proxy.go delete mode 100644 vendor/golang.org/x/net/proxy/socks5.go delete mode 100644 vendor/golang.org/x/sync/semaphore/semaphore.go delete mode 100644 vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s delete mode 100644 vendor/golang.org/x/sys/cpu/byteorder.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_aix.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.s delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_x86.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_loong64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_mips64x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_mipsx.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_ppc64x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_riscv64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.s delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_wasm.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.s delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go delete mode 100644 vendor/golang.org/x/sys/cpu/endian_big.go delete mode 100644 vendor/golang.org/x/sys/cpu/endian_little.go delete mode 100644 vendor/golang.org/x/sys/cpu/hwcap_linux.go delete mode 100644 vendor/golang.org/x/sys/cpu/parse.go delete mode 100644 vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go delete mode 100644 vendor/golang.org/x/sys/cpu/runtime_auxv.go delete mode 100644 vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go delete mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go delete mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go diff --git a/Makefile b/Makefile index 7c96ae13..d3592104 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -MF_DOCKER_IMAGE_NAME_PREFIX ?= mainflux +MF_DOCKER_IMAGE_NAME_PREFIX ?= magistrala SVC = ui BUILD_DIR = build CGO_ENABLED ?= 0 @@ -10,11 +10,11 @@ TIME ?= $(shell date +%F_%T) define compile_service CGO_ENABLED=$(CGO_ENABLED) GOOS=$(GOOS) GOARCH=$(GOARCH) GOARM=$(GOARM) \ - go build -mod=vendor -ldflags "-s -w \ - -X 'github.com/mainflux/mainflux.BuildTime=$(TIME)' \ - -X 'github.com/mainflux/mainflux.Version=$(VERSION)' \ - -X 'github.com/mainflux/mainflux.Commit=$(COMMIT)'" \ - -o ${BUILD_DIR}/mainflux-$(1) cmd/$(1)/main.go + go build -ldflags "-s -w \ + -X 'github.com/absmach/magistrala-ui.BuildTime=$(TIME)' \ + -X 'github.com/absmach/magistrala-ui.Version=$(VERSION)' \ + -X 'github.com/absmach/magistrala-ui.Commit=$(COMMIT)'" \ + -o ${BUILD_DIR}/magistrala-$(1) cmd/$(1)/main.go endef define make_docker @@ -76,4 +76,4 @@ run_docker: docker-compose -f docker/docker-compose.yml --env-file docker/.env up run: - ${BUILD_DIR}/mainflux-ui + ${BUILD_DIR}/magistrala-ui diff --git a/README.md b/README.md index 866e9709..44cdf25d 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,14 @@ -# Mainflux-ui +# Magistrala-ui -The Mainflux-ui functions as a Graphical User Interface (GUI) designed to interact with Mainflux services, encompassing both the creation and management aspects of users, things, channels, and groups. It streamlines tasks such as user and thing creation, channel establishment, policy configuration, and HTTP message transmission across various channels. +The Magistrala-ui functions as a Graphical User Interface (GUI) designed to interact with Magistrala services, encompassing both the creation and management aspects of users, things, channels, and groups. It streamlines tasks such as user and thing creation, channel establishment, policy configuration, and HTTP message transmission across various channels. -Mainflux-ui can be obtained as an independent subset of Mainflux; however, it requires integration with the mainflux core services to function properly. +Magistrala-ui can be obtained as an independent subset of Magistrala; however, it requires integration with the Magistrala core services to function properly. ## Prerequisites -To run Mainflux-ui, you need the following components: +To run Magistrala-ui, you need the following components: -- [Mainflux](https://github.com/mainflux/mainflux) (latest version) +- [Magistrala](https://github.com/absmach/magistrala) (latest version) - [Go](https://golang.org/doc/install) (version 1.19.2) - [Docker-compose](https://docs.docker.com/compose/install/) (latest version) - [make](https://www.gnu.org/software/make/manual/make.html) @@ -25,7 +25,7 @@ make make run ``` -These commands will launch Mainflux-ui. To use the Mainflux-ui, ensure that the Mainflux core services are operational. You can achieve this by installing [Mainflux](https://github.com/mainflux/mainflux) and its prerequisites. +These commands will launch Magistrala-ui. To use the Magistrala-ui, ensure that the Mainflux core services are operational. You can achieve this by installing [Magistrala](https://github.com/absmach/magistrala) and its prerequisites. To build the docker images for the ui service, run the following commands which will build the docker images in different configurations. @@ -51,7 +51,7 @@ This brings up the docker images and runs ui in the configuration specified in t ## Usage -Once both Mainflux core and Mainflux-ui are running, you can access the Mainflux-ui interface locally by entering the address: [http://localhost:9090](http://localhost:9090). +Once both Magistrala core and Magistrala-ui are running, you can access the Magistrala-ui interface locally by entering the address: [http://localhost:9095](http://localhost:9095). On the login page, use the provided credentials to log in to the interface: @@ -60,7 +60,7 @@ Email: admin@example.com Password: 12345678 ``` -Upon logging in, you will be directed to the Dashboard, which provides an overview of the Mainflux user interface. The sidebar elements, such as Users/Groups, allow you to navigate to specific pages for performing actions related to Users, Groups, Users Policies, Things, Channels, Things Policies, and viewing Deleted Clients. +Upon logging in, you will be directed to the Dashboard, which provides an overview of the Magistrala user interface. The sidebar elements, such as Users/Groups, allow you to navigate to specific pages for performing actions related to Users, Groups, Things, Channels, and Bootstraps. ### Users @@ -76,10 +76,6 @@ When using a CSV file to create multiple groups, the file should contain group n For more details, refer to the official [Documentation](http://docs.mainflux.io/cli/#things-management). -### Users Policies - -To create a user policy, select the subject and object, and choose the relevant actions (multiple selections are allowed). User policies are utilized to manage permissions for users and groups entities. These Policies determine access rights for these entities. For instance, they define which users can access specific groups. Learn more about policies from the official [Documentation](https://docs.mainflux.io/authorization/#summary-of-defined-policies). - ### Things You can create individual things or upload a CSV file for multiple things. When adding a thing, provide the Thing Name (required), Thing ID, Identity, Secret, Tags (as a string slice), and Metadata (in JSON format). The Thing Secret should be unique and is used to identify the thing. Metadata offers additional information about the thing. @@ -90,13 +86,9 @@ For multiple things, use a CSV file with thing names in one column. Refer to the Similarly, you can add individual or multiple channels using a CSV file. For channel creation, enter the Channel Name (required), select the Parent ID, provide a Description, and include Metadata (in JSON format). -### Things Policies - -Creating a thing policy involves selecting the subject and object and specifying the actions (multiple selections are allowed). Things policies control permissions for things and channels entities. They define access permissions, such as a thing's access to a channel. For detailed policy information, consult the official [Documentation](https://docs.mainflux.io/authorization/#summary-of-defined-policies). - ### Bootstrap -To use bootstrap, ensure that the [bootstrap](http://docs.mainflux.io/bootstrap/) addon is active as part of the Mainflux core services. +To use bootstrap, ensure that the [bootstrap](http://docs.mainflux.io/bootstrap/) addon is active as part of the Magistrala core services. To configure bootstrap, provide the Name, Thing ID, External ID, External Key, Channel (as a string slice), Content (in JSON format), Client Cert, Client Key, and CA Cert. diff --git a/cmd/ui/main.go b/cmd/ui/main.go index 8236ce8b..51308959 100644 --- a/cmd/ui/main.go +++ b/cmd/ui/main.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Abstract Machines // SPDX-License-Identifier: Apache-2.0 package main @@ -11,29 +11,29 @@ import ( "os/signal" "syscall" + "github.com/absmach/magistrala-ui/ui" + "github.com/absmach/magistrala-ui/ui/api" + "github.com/absmach/magistrala/logger" + sdk "github.com/absmach/magistrala/pkg/sdk/go" + "github.com/absmach/magistrala/pkg/uuid" "github.com/caarlos0/env/v9" "github.com/go-chi/chi/v5" kitprometheus "github.com/go-kit/kit/metrics/prometheus" - "github.com/mainflux/mainflux/logger" - sdk "github.com/mainflux/mainflux/pkg/sdk/go" - "github.com/mainflux/mainflux/pkg/uuid" stdprometheus "github.com/prometheus/client_golang/prometheus" - "github.com/ultravioletrs/mainflux-ui/ui" - "github.com/ultravioletrs/mainflux-ui/ui/api" ) type config struct { - LogLevel string `env:"MF_UI_LOG_LEVEL" envDefault:"info"` - Port string `env:"MF_UI_PORT" envDefault:"9095"` - InstanceID string `env:"MF_UI_INSTANCE_ID" envDefault:""` - HTTPAdapterURL string `env:"MF_HTTP_ADAPTER_URL" envDefault:"http://localhost:8008"` - ReaderURL string `env:"MF_READER_URL" envDefault:"http://localhost:9007"` - ThingsURL string `env:"MF_THINGS_URL" envDefault:"http://localhost:9000"` - UsersURL string `env:"MF_USERS_URL" envDefault:"http://localhost:9002"` - HostURL string `env:"MF_UI_HOST_URL" envDefault:"http://localhost:9095"` - BootstrapURL string `env:"MF_BOOTSTRAP_URL" envDefault:"http://localhost:9013"` - MsgContentType sdk.ContentType `env:"MF_CONTENT-TYPE" envDefault:"application/senml+json"` - TLSVerification bool `env:"MF_VERIFICATION_TLS" envDefault:"false"` + LogLevel string `env:"MG_UI_LOG_LEVEL" envDefault:"info"` + Port string `env:"MG_UI_PORT" envDefault:"9095"` + InstanceID string `env:"MG_UI_INSTANCE_ID" envDefault:""` + HTTPAdapterURL string `env:"MG_HTTP_ADAPTER_URL" envDefault:"http://localhost:8008"` + ReaderURL string `env:"MG_READER_URL" envDefault:"http://localhost:9007"` + ThingsURL string `env:"MG_THINGS_URL" envDefault:"http://localhost:9000"` + UsersURL string `env:"MG_USERS_URL" envDefault:"http://localhost:9002"` + HostURL string `env:"MG_UI_HOST_URL" envDefault:"http://localhost:9095"` + BootstrapURL string `env:"MG_BOOTSTRAP_URL" envDefault:"http://localhost:9013"` + MsgContentType sdk.ContentType `env:"MG_CONTENT-TYPE" envDefault:"application/senml+json"` + TLSVerification bool `env:"MG_VERIFICATION_TLS" envDefault:"false"` } func main() { diff --git a/docker/.env b/docker/.env index e501a910..857be82f 100644 --- a/docker/.env +++ b/docker/.env @@ -1,16 +1,16 @@ # Docker: Environment variables in Compose ## UI -MF_UI_LOG_LEVEL=debug -MF_UI_PORT=9095 -MF_HTTP_ADAPTER_URL=http://localhost:8008 -MF_READER_URL=http://localhost:9007 -MF_THINGS_URL=http://localhost:9000 -MF_USERS_URL=http://localhost:9002 -MF_VERIFICATION_TLS=false -MF_BOOTSTRAP_URL=http://localhost:9013 -MF_UI_INSTANCE_ID= -MF_UI_HOST_URL=http://localhost:9095 +MG_UI_LOG_LEVEL=debug +MG_UI_PORT=9095 +MG_HTTP_ADAPTER_URL=http://localhost:8008 +MG_READER_URL=http://localhost:9007 +MG_THINGS_URL=http://localhost:9000 +MG_USERS_URL=http://localhost:9002 +MG_VERIFICATION_TLS=false +MG_BOOTSTRAP_URL=http://localhost:9013 +MG_UI_INSTANCE_ID= +MG_UI_HOST_URL=http://localhost:9095 # Docker image tag -MF_RELEASE_TAG=latest +MG_RELEASE_TAG=latest diff --git a/docker/Dockerfile.dev b/docker/Dockerfile.dev index 46e04c97..454be1ba 100644 --- a/docker/Dockerfile.dev +++ b/docker/Dockerfile.dev @@ -1,5 +1,5 @@ FROM scratch ARG SVC -COPY ./build/mainflux-$SVC /exe +COPY ./build/magistrala-$SVC /exe COPY ./$SVC/web /$SVC/web ENTRYPOINT ["/exe"] diff --git a/docker/README.md b/docker/README.md index 4b569895..3c297d91 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,6 +1,6 @@ # Docker Composition -Configure environment variables and run Mainflux Docker Composition. +Configure environment variables and run Magistrala UI Docker Composition. \*Note\*\*: `docker-compose` uses `.env` file to set all environment variables. Ensure that you run the command from the same location as .env file. diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index f5e7005e..09ec988d 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -1,29 +1,29 @@ -# Copyright (c) Mainflux +# Copyright (c) Magistrala # SPDX-License-Identifier: Apache-2.0 version: "3.7" networks: - mainflux-base-net: + magistrala-base-net: driver: bridge services: ui: - image: mainflux/ui:${MF_RELEASE_TAG} - container_name: mainflux-ui + image: magistrala/ui:${MG_RELEASE_TAG} + container_name: magistrala-ui restart: on-failure ports: - - ${MF_UI_PORT}:${MF_UI_PORT} + - ${MG_UI_PORT}:${MG_UI_PORT} networks: - - mainflux-base-net + - magistrala-base-net environment: - MF_UI_LOG_LEVEL: ${MF_UI_LOG_LEVEL} - MF_UI_PORT: ${MF_UI_PORT} - MF_HTTP_ADAPTER_URL: ${MF_HTTP_ADAPTER_URL} - MF_READER_URL: ${MF_READER_URL} - MF_THINGS_URL: ${MF_THINGS_URL} - MF_USERS_URL: ${MF_USERS_URL} - MF_BOOTSTRAP_URL: ${MF_BOOTSTRAP_URL} - MF_VERIFICATION_TLS: ${MF_VERIFICATION_TLS} - MF_UI_INSTANCE_ID: ${MF_UI_INSTANCE_ID} - MF_UI_HOST_URL: ${MF_UI_HOST_URL} + MG_UI_LOG_LEVEL: ${MG_UI_LOG_LEVEL} + MG_UI_PORT: ${MG_UI_PORT} + MG_HTTP_ADAPTER_URL: ${MG_HTTP_ADAPTER_URL} + MG_READER_URL: ${MG_READER_URL} + MG_THINGS_URL: ${MG_THINGS_URL} + MG_USERS_URL: ${MG_USERS_URL} + MG_BOOTSTRAP_URL: ${MG_BOOTSTRAP_URL} + MG_VERIFICATION_TLS: ${MG_VERIFICATION_TLS} + MG_UI_INSTANCE_ID: ${MG_UI_INSTANCE_ID} + MG_UI_HOST_URL: ${MG_UI_HOST_URL} diff --git a/go.mod b/go.mod index 654b726e..2763dfd6 100644 --- a/go.mod +++ b/go.mod @@ -1,53 +1,37 @@ -module github.com/ultravioletrs/mainflux-ui +module github.com/absmach/magistrala-ui -go 1.21 +go 1.21.0 require ( + github.com/absmach/magistrala v0.11.1-0.20231102134813-44408395e6a3 github.com/caarlos0/env/v9 v9.0.0 github.com/go-chi/chi/v5 v5.0.10 github.com/go-kit/kit v0.13.0 github.com/go-zoo/bone v1.3.0 github.com/golang-jwt/jwt v3.2.2+incompatible - github.com/mainflux/agent v0.11.1-0.20230724130550-0cd3f4c8c27c - github.com/mainflux/mainflux v0.0.0-20231021215047-ab832aff1b52 github.com/prometheus/client_golang v1.17.0 golang.org/x/exp v0.0.0-20231006140011-7918f672742d golang.org/x/sync v0.4.0 - google.golang.org/grpc v1.58.3 -) - -require ( - github.com/creack/pty v1.1.18 // indirect - github.com/edgexfoundry/go-mod-core-contracts v0.1.70 // indirect - github.com/google/uuid v1.3.1 // indirect - github.com/gorilla/websocket v1.5.0 // indirect - github.com/klauspost/compress v1.17.0 // indirect - github.com/mainflux/export v0.1.1-0.20230724124847-67d0bc7f38cb // indirect - github.com/nats-io/nats.go v1.30.2 // indirect - github.com/nats-io/nkeys v0.4.5 // indirect - github.com/nats-io/nuid v1.0.1 // indirect - github.com/pelletier/go-toml v1.9.5 // indirect - golang.org/x/crypto v0.14.0 // indirect - golang.org/x/net v0.17.0 // indirect - golang.org/x/text v0.13.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a // indirect + google.golang.org/grpc v1.59.0 ) require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/eclipse/paho.mqtt.golang v1.4.3 github.com/fxamacker/cbor/v2 v2.5.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/mainflux/senml v1.5.0 - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mainflux/senml v1.5.0 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/common v0.45.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/x448/float16 v0.8.4 // indirect + golang.org/x/net v0.17.0 // indirect golang.org/x/sys v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 // indirect google.golang.org/protobuf v1.31.0 // indirect ) diff --git a/go.sum b/go.sum index 4980648d..e9cded9d 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,7 @@ github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/absmach/magistrala v0.11.1-0.20231102134813-44408395e6a3 h1:g5dSaPtjj9mNnz2cMJ076MRKSnrOcMjW8BsJ7Kbzd7s= +github.com/absmach/magistrala v0.11.1-0.20231102134813-44408395e6a3/go.mod h1:ebPpg3UNO6ier1Ic2jBHkd8VUDD62707JRacj4UwGkM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/caarlos0/env/v7 v7.1.0 h1:9lzTF5amyQeWHZzuZeKlCb5FWSUxpG1js43mhbY8ozg= @@ -10,15 +12,11 @@ github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTx github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/eclipse/paho.mqtt.golang v1.4.3 h1:2kwcUGn8seMUfWndX0hGbvH8r7crgcJguQNCyp70xik= github.com/eclipse/paho.mqtt.golang v1.4.3/go.mod h1:CSYvoAlsMkhYOXh/oKyxa8EcBci6dVkLCbo5tTC1RIE= -github.com/edgexfoundry/go-mod-core-contracts v0.1.70 h1:MYqxk52m5J37IlNRI0NxJGEtIx+1qdPp1pdYcMYIuug= -github.com/edgexfoundry/go-mod-core-contracts v0.1.70/go.mod h1:Bt+lYZeV02ndr/Jr6wnA3em2J+VTzZ1c0KVtqkNKdpg= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fxamacker/cbor/v2 v2.2.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= @@ -30,41 +28,28 @@ github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU= github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/validator/v10 v10.3.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-zoo/bone v1.3.0 h1:PY6sHq37FnQhj+4ZyqFIzJQHvrrGx0GEc3vTZZC/OsI= github.com/go-zoo/bone v1.3.0/go.mod h1:HI3Lhb7G3UQcAwEhOJ2WyNcsFtQX1WYHa0Hl4OBbhW8= github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= -github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= @@ -75,8 +60,8 @@ github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZn github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8 h1:iBt4Ew4XEGLfh6/bPk4rSYmuZJGizr6/x/AEizP0CQc= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8/go.mod h1:aiJI+PIApBRQG7FZTEBx5GiiX+HbOHilUdNxUZi4eV0= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= github.com/hashicorp/go-sockaddr v1.0.5 h1:dvk7TIXCZpmfOlM+9mlcrWmWjw/wlKT+VDq2wMvfPJU= @@ -97,34 +82,22 @@ github.com/jackc/pgx/v5 v5.4.3 h1:cxFyXhxlvAifxnkKKdlxv8XqUf59tDlYjnV5YYfsJJY= github.com/jackc/pgx/v5 v5.4.3/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSljmHRA= github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= -github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= -github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/mainflux/agent v0.11.1-0.20230724130550-0cd3f4c8c27c h1:qDlZcB6dQjRk/vrTD3TOWoMIGQGA8BskCvuYp1yVMu8= -github.com/mainflux/agent v0.11.1-0.20230724130550-0cd3f4c8c27c/go.mod h1:P3Vcibeywrzbu4B72ZRX2AdZDUW1oJF2ll3qwr8hW+c= -github.com/mainflux/export v0.1.1-0.20230724124847-67d0bc7f38cb h1:+Zz1+/lX3bmt2AWyJHG2yVsJ7vqa0Q2XIYagG1dHj5c= -github.com/mainflux/export v0.1.1-0.20230724124847-67d0bc7f38cb/go.mod h1:speJE1lnq2emg5o4DqEoOe6nOgFRd0AEYSBBV2bCGKg= -github.com/mainflux/mainflux v0.0.0-20231021215047-ab832aff1b52 h1:FtOhk1e6z/r9moTL5Yai0SSlMGshto0jJiSePhG1NhA= -github.com/mainflux/mainflux v0.0.0-20231021215047-ab832aff1b52/go.mod h1:CUh9lZeBhrEvxmUoXb3flcdh8Zj3l/24SsKS5afUIN4= +github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= +github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/mainflux/mproxy v0.3.1-0.20231022160500-0e0db9e1642c h1:iF14azUs+lEzWPgVe6+lPqSjKGuN0d66cCFd2Cxt9Mo= +github.com/mainflux/mproxy v0.3.1-0.20231022160500-0e0db9e1642c/go.mod h1:NruAIEwk3udRzb8ZOrbA77Zo0eix3W8pTGOA8E6hvpg= github.com/mainflux/senml v1.5.0 h1:GAd1y1eMohfa6sVYcr2iQfVfkkh9l/q7B1TWF5L68xs= github.com/mainflux/senml v1.5.0/go.mod h1:SMX76mM5yenjLVjZOM27+njCGkP+AA64O46nRQiBRlE= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= -github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/nats-io/jwt/v2 v2.2.1-0.20220330180145-442af02fd36a h1:lem6QCvxR0Y28gth9P+wV2K/zYUUAkJ+55U8cpS0p5I= -github.com/nats-io/jwt/v2 v2.2.1-0.20220330180145-442af02fd36a/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= -github.com/nats-io/nats-server/v2 v2.8.4 h1:0jQzze1T9mECg8YZEl8+WYUXb9JKluJfCBriPUtluB4= -github.com/nats-io/nats-server/v2 v2.8.4/go.mod h1:8zZa+Al3WsESfmgSs98Fi06dRWLH5Bnq90m5bKD/eT4= -github.com/nats-io/nats.go v1.30.2 h1:aloM0TGpPorZKQhbAkdCzYDj+ZmsJDyeo3Gkbr72NuY= -github.com/nats-io/nats.go v1.30.2/go.mod h1:dcfhUgmQNN4GJEfIb2f9R7Fow+gzBF4emzDHrVBd5qM= -github.com/nats-io/nkeys v0.4.5 h1:Zdz2BUlFm4fJlierwvGK+yl20IAKUm7eV6AAZXEhkPk= -github.com/nats-io/nkeys v0.4.5/go.mod h1:XUkxdLPTufzlihbamfzQ7mw/VGx6ObUs+0bN5sNvt64= +github.com/nats-io/nats.go v1.31.0 h1:/WFBHEc/dOKBF6qf1TZhrdEfTmOZ5JzdJ+Y3m6Y/p7E= +github.com/nats-io/nats.go v1.31.0/go.mod h1:di3Bm5MLsoB4Bx61CBTsxuarI36WbhAwOm8QrW39+i8= +github.com/nats-io/nkeys v0.4.6 h1:IzVe95ru2CT6ta874rt9saQRkWfe2nFj1NtvYSLqMzY= +github.com/nats-io/nkeys v0.4.6/go.mod h1:4DxZNzenSVd1cYQoAa8948QY3QDjrHfcfVADymtkpts= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= @@ -136,8 +109,8 @@ github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1 github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rabbitmq/amqp091-go v1.9.0 h1:qrQtyzB4H8BQgEuJwhmVQqVHB9O4+MNDJCCAcpc3Aoo= @@ -149,7 +122,6 @@ github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIH github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.1 h1:4VhoImhV/Bm0ToFkXFi8hXNXwpDRZ/ynw3amt82mzq0= github.com/stretchr/objx v0.5.1/go.mod h1:/iHQpkQwBD6DLUmQ4pE+s1TXdob1mORJ4/UFdrifcy0= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= @@ -169,28 +141,24 @@ golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a h1:a2MQQVoTo96JC9PMGtGBymLp7+/RzpFc2yX/9WfFg1c= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0= -google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= -google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 h1:AB/lmRny7e2pLhFEYIbl5qkDAUt2h0ZRO4wGPhZf+ik= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/package-lock.json b/package-lock.json index 0d892de8..6dd88cd5 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,52 +1,52 @@ { - "name": "mainflux-ui", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "devDependencies": { - "prettier": "^3.0.3", - "prettier-plugin-go-template": "^0.0.15" - } - }, - "node_modules/prettier": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.0.3.tgz", - "integrity": "sha512-L/4pUDMxcNa8R/EthV08Zt42WBO4h1rarVtK0K+QJG0X187OLo7l699jWw0GKuwzkPQ//jMFA/8Xm6Fh3J/DAg==", - "dev": true, - "bin": { - "prettier": "bin/prettier.cjs" - }, - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/prettier/prettier?sponsor=1" - } - }, - "node_modules/prettier-plugin-go-template": { - "version": "0.0.15", - "resolved": "https://registry.npmjs.org/prettier-plugin-go-template/-/prettier-plugin-go-template-0.0.15.tgz", - "integrity": "sha512-WqU92E1NokWYNZ9mLE6ijoRg6LtIGdLMePt2C7UBDjXeDH9okcRI3zRqtnWR4s5AloiqyvZ66jNBAa9tmRY5EQ==", - "dev": true, - "dependencies": { - "ulid": "^2.3.0" - }, - "engines": { - "node": ">=14.0.0" - }, - "peerDependencies": { - "prettier": "^3.0.0" - } - }, - "node_modules/ulid": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/ulid/-/ulid-2.3.0.tgz", - "integrity": "sha512-keqHubrlpvT6G2wH0OEfSW4mquYRcbe/J8NMmveoQOjUqmo+hXtO+ORCpWhdbZ7k72UtY61BL7haGxW6enBnjw==", - "dev": true, - "bin": { - "ulid": "bin/cli.js" - } - } - } + "name": "magistrala-ui", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "devDependencies": { + "prettier": "^3.0.3", + "prettier-plugin-go-template": "^0.0.15" + } + }, + "node_modules/prettier": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.0.3.tgz", + "integrity": "sha512-L/4pUDMxcNa8R/EthV08Zt42WBO4h1rarVtK0K+QJG0X187OLo7l699jWw0GKuwzkPQ//jMFA/8Xm6Fh3J/DAg==", + "dev": true, + "bin": { + "prettier": "bin/prettier.cjs" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" + } + }, + "node_modules/prettier-plugin-go-template": { + "version": "0.0.15", + "resolved": "https://registry.npmjs.org/prettier-plugin-go-template/-/prettier-plugin-go-template-0.0.15.tgz", + "integrity": "sha512-WqU92E1NokWYNZ9mLE6ijoRg6LtIGdLMePt2C7UBDjXeDH9okcRI3zRqtnWR4s5AloiqyvZ66jNBAa9tmRY5EQ==", + "dev": true, + "dependencies": { + "ulid": "^2.3.0" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "prettier": "^3.0.0" + } + }, + "node_modules/ulid": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/ulid/-/ulid-2.3.0.tgz", + "integrity": "sha512-keqHubrlpvT6G2wH0OEfSW4mquYRcbe/J8NMmveoQOjUqmo+hXtO+ORCpWhdbZ7k72UtY61BL7haGxW6enBnjw==", + "dev": true, + "bin": { + "ulid": "bin/cli.js" + } + } + } } diff --git a/ui/README.md b/ui/README.md index 0336b618..0a0941ab 100644 --- a/ui/README.md +++ b/ui/README.md @@ -1,6 +1,6 @@ # UI -UI provides an alternative method to interact with the mainflux system. +UI provides an alternative method to interact with the Magistrala system. ## Configuration @@ -10,13 +10,13 @@ default values. | Variable | Description | Default | | ------------------- | ------------------------------------- | --------------------- | -| MF_UI_LOG_LEVEL | Log level for UI | info | -| MF_UI_PORT | Port where UI service is run | 9090 | -| MF_HTTP_ADAPTER_URL | HTTP adapter URL | http://localhost:8008 | -| MF_READER_URL | Reader URL | http://localhost:9007 | -| MF_THINGS_URL | Things URL | http://localhost:9000 | -| MF_USERS_URL | Users URL | http://localhost:9002 | -| MF_VERIFICATION_TLS | Verification TLS flag | false | -| MF_BOOTSTRAP_URL | Bootstrap URL | http://localhost:9013 | -| MF_UI_INSTANCE_ID | Unique identifier for the UI instance | | -| MF_UI_HOST_URL | Base URL for the UI | http://localhost:9090 | +| MG_UI_LOG_LEVEL | Log level for UI | info | +| MG_UI_PORT | Port where UI service is run | 9095 | +| MG_HTTP_ADAPTER_URL | HTTP adapter URL | http://localhost:8008 | +| MG_READER_URL | Reader URL | http://localhost:9007 | +| MG_THINGS_URL | Things URL | http://localhost:9000 | +| MG_USERS_URL | Users URL | http://localhost:9002 | +| MG_VERIFICATION_TLS | Verification TLS flag | false | +| MG_BOOTSTRAP_URL | Bootstrap URL | http://localhost:9013 | +| MG_UI_INSTANCE_ID | Unique identifier for the UI instance | | +| MG_UI_HOST_URL | Base URL for the UI | http://localhost:9095 | diff --git a/ui/api/doc.go b/ui/api/doc.go index fb3127e4..2424852c 100644 --- a/ui/api/doc.go +++ b/ui/api/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Abstract Machines // SPDX-License-Identifier: Apache-2.0 // Package api contains API-related concerns: endpoint definitions, middlewares diff --git a/ui/api/endpoint.go b/ui/api/endpoint.go index f1e308a9..66dc7367 100644 --- a/ui/api/endpoint.go +++ b/ui/api/endpoint.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Abstract Machines // SPDX-License-Identifier: Apache-2.0 package api @@ -9,10 +9,10 @@ import ( "golang.org/x/sync/errgroup" - "github.com/ultravioletrs/mainflux-ui/ui" + "github.com/absmach/magistrala-ui/ui" + sdk "github.com/absmach/magistrala/pkg/sdk/go" "github.com/go-kit/kit/endpoint" - sdk "github.com/mainflux/mainflux/pkg/sdk/go" ) func indexEndpoint(svc ui.Service) endpoint.Endpoint { diff --git a/ui/api/logging.go b/ui/api/logging.go index b298b41e..281a8d74 100644 --- a/ui/api/logging.go +++ b/ui/api/logging.go @@ -1,9 +1,6 @@ -// Copyright (c) Mainflux +// Copyright (c) Abstract Machines // SPDX-License-Identifier: Apache-2.0 -//go:build !test -// +build !test - package api import ( @@ -11,11 +8,11 @@ import ( "fmt" "time" - "github.com/ultravioletrs/mainflux-ui/ui" + "github.com/absmach/magistrala-ui/ui" - log "github.com/mainflux/mainflux/logger" - "github.com/mainflux/mainflux/pkg/messaging" - sdk "github.com/mainflux/mainflux/pkg/sdk/go" + log "github.com/absmach/magistrala/logger" + "github.com/absmach/magistrala/pkg/messaging" + sdk "github.com/absmach/magistrala/pkg/sdk/go" ) var _ ui.Service = (*loggingMiddleware)(nil) diff --git a/ui/api/metrics.go b/ui/api/metrics.go index 81615e32..241f1daf 100644 --- a/ui/api/metrics.go +++ b/ui/api/metrics.go @@ -1,20 +1,17 @@ -// Copyright (c) Mainflux +// Copyright (c) Abstract Machines // SPDX-License-Identifier: Apache-2.0 -//go:build !test -// +build !test - package api import ( "context" "time" - "github.com/ultravioletrs/mainflux-ui/ui" + "github.com/absmach/magistrala-ui/ui" + "github.com/absmach/magistrala/pkg/messaging" + sdk "github.com/absmach/magistrala/pkg/sdk/go" "github.com/go-kit/kit/metrics" - "github.com/mainflux/mainflux/pkg/messaging" - sdk "github.com/mainflux/mainflux/pkg/sdk/go" ) var _ ui.Service = (*metricsMiddleware)(nil) diff --git a/ui/api/requests.go b/ui/api/requests.go index fdb7ee2b..d1c4c572 100644 --- a/ui/api/requests.go +++ b/ui/api/requests.go @@ -1,11 +1,11 @@ -// Copyright (c) Mainflux +// Copyright (c) Abstract Machines // SPDX-License-Identifier: Apache-2.0 package api import ( - "github.com/mainflux/mainflux/pkg/messaging" - sdk "github.com/mainflux/mainflux/pkg/sdk/go" + "github.com/absmach/magistrala/pkg/messaging" + sdk "github.com/absmach/magistrala/pkg/sdk/go" ) const maxNameSize = 1024 diff --git a/ui/api/responses.go b/ui/api/responses.go index 48605d6f..cd9cab80 100644 --- a/ui/api/responses.go +++ b/ui/api/responses.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Abstract Machines // SPDX-License-Identifier: Apache-2.0 package api @@ -7,13 +7,13 @@ import ( "fmt" "net/http" - "github.com/mainflux/mainflux" + "github.com/absmach/magistrala" ) var ( - _ mainflux.Response = (*uiRes)(nil) - _ mainflux.Response = (*tokenRes)(nil) - _ mainflux.Response = (*terminalResponse)(nil) + _ magistrala.Response = (*uiRes)(nil) + _ magistrala.Response = (*tokenRes)(nil) + _ magistrala.Response = (*terminalResponse)(nil) ) type uiRes struct { diff --git a/ui/api/transport.go b/ui/api/transport.go index f5670e0e..ad65ad9f 100644 --- a/ui/api/transport.go +++ b/ui/api/transport.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Abstract Machines // SPDX-License-Identifier: Apache-2.0 package api @@ -15,17 +15,17 @@ import ( "strings" "time" + "github.com/absmach/magistrala" + "github.com/absmach/magistrala-ui/ui" "github.com/go-zoo/bone" - "github.com/ultravioletrs/mainflux-ui/ui" "github.com/golang-jwt/jwt" + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/magistrala/pkg/messaging" + sdk "github.com/absmach/magistrala/pkg/sdk/go" "github.com/go-chi/chi/v5" kithttp "github.com/go-kit/kit/transport/http" - "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/pkg/messaging" - sdk "github.com/mainflux/mainflux/pkg/sdk/go" "github.com/prometheus/client_golang/prometheus/promhttp" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -710,7 +710,7 @@ func MakeHandler(svc ui.Service, r *chi.Mux, instanceID string) http.Handler { }) }) - r.Get("/health", mainflux.Health("ui", instanceID)) + r.Get("/health", magistrala.Health("ui", instanceID)) r.Handle("/metrics", promhttp.Handler()) r.NotFound(kithttp.NewServer( @@ -2354,7 +2354,7 @@ func encodeResponse(_ context.Context, w http.ResponseWriter, response interface } func encodeJSONResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { - if ar, ok := response.(mainflux.Response); ok { + if ar, ok := response.(magistrala.Response); ok { for k, v := range ar.Headers() { w.Header().Set(k, v) } diff --git a/ui/service.go b/ui/service.go index f3147431..81b63d86 100644 --- a/ui/service.go +++ b/ui/service.go @@ -1,32 +1,26 @@ -// Copyright (c) Mainflux +// Copyright (c) Abstract Machines // SPDX-License-Identifier: Apache-2.0 // Package ui contains the domain concept definitions needed to support -// Mainflux ui adapter service functionality. +// Magistrala ui adapter service functionality. package ui import ( "bytes" "context" "encoding/json" - "fmt" "html/template" - "log" "math" "strings" - "sync" "time" "golang.org/x/exp/slices" - mqtt "github.com/eclipse/paho.mqtt.golang" - "github.com/mainflux/agent/pkg/bootstrap" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/pkg/messaging" - "github.com/mainflux/mainflux/pkg/transformers/senml" - mfsenml "github.com/mainflux/senml" + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/magistrala/pkg/messaging" + "github.com/absmach/magistrala/pkg/transformers/senml" - sdk "github.com/mainflux/mainflux/pkg/sdk/go" + sdk "github.com/absmach/magistrala/pkg/sdk/go" ) const ( @@ -1875,87 +1869,87 @@ func (us *uiService) GetRemoteTerminal(id, token string) ([]byte, error) { return btpl.Bytes(), nil } -func (us *uiService) ProcessTerminalCommand(ctx context.Context, id, tkn, command string, res chan string) error { - cfg, err := us.sdk.ViewBootstrap(id, tkn) - if err != nil { - return errors.Wrap(err, ErrFailedRetreive) - } - - var content bootstrap.ServicesConfig - - if err := json.Unmarshal([]byte(cfg.Content), &content); err != nil { - return err - } - - channels, ok := cfg.Channels.([]sdk.Channel) - if !ok { - return errors.New("invalid channels") - } - - pubTopic := fmt.Sprintf("channels/%s/messages/req", channels[0].ID) - subTopic := fmt.Sprintf("channels/%s/messages/res/#", channels[0].ID) - - opts := mqtt.NewClientOptions().SetCleanSession(true).SetAutoReconnect(true) - - opts.AddBroker(content.Agent.MQTT.URL) - if content.Agent.MQTT.Username == "" || content.Agent.MQTT.Password == "" { - opts.SetUsername(cfg.ThingID) - opts.SetPassword(cfg.ThingKey) - } else { - opts.SetUsername(content.Agent.MQTT.Username) - opts.SetPassword(content.Agent.MQTT.Password) - } - - opts.SetClientID(fmt.Sprintf("ui-terminal-%s", cfg.ThingID)) - client := mqtt.NewClient(opts) - - if token := client.Connect(); token.Wait() && token.Error() != nil { - return token.Error() - } - - req := []mfsenml.Record{ - {BaseName: "1", Name: "exec", StringValue: &command}, - } - reqByte, err1 := json.Marshal(req) - if err1 != nil { - return err1 - } - - token := client.Publish(pubTopic, 0, false, string(reqByte)) - token.Wait() - - if token.Error() != nil { - return token.Error() - } - - var wg sync.WaitGroup - wg.Add(1) - errChan := make(chan error) - - client.Subscribe(subTopic, 0, func(_ mqtt.Client, m mqtt.Message) { - var data []mfsenml.Record - if err := json.Unmarshal(m.Payload(), &data); err != nil { - errChan <- err - } - res <- *data[0].StringValue - wg.Done() - }) - - select { - case <-ctx.Done(): - log.Println("ProcessTerminalCommand canceled") - case <-time.After(time.Second * 5): - log.Println("Timeout occurred") - res <- "timeout" - case err := <-errChan: - return err - case <-res: - wg.Wait() - } - - client.Disconnect(250) - return nil -} +// func (us *uiService) ProcessTerminalCommand(ctx context.Context, id, tkn, command string, res chan string) error { +// cfg, err := us.sdk.ViewBootstrap(id, tkn) +// if err != nil { +// return errors.Wrap(err, ErrFailedRetreive) +// } + +// var content bootstrap.ServicesConfig + +// if err := json.Unmarshal([]byte(cfg.Content), &content); err != nil { +// return err +// } + +// channels, ok := cfg.Channels.([]sdk.Channel) +// if !ok { +// return errors.New("invalid channels") +// } + +// pubTopic := fmt.Sprintf("channels/%s/messages/req", channels[0].ID) +// subTopic := fmt.Sprintf("channels/%s/messages/res/#", channels[0].ID) + +// opts := mqtt.NewClientOptions().SetCleanSession(true).SetAutoReconnect(true) + +// opts.AddBroker(content.Agent.MQTT.URL) +// if content.Agent.MQTT.Username == "" || content.Agent.MQTT.Password == "" { +// opts.SetUsername(cfg.ThingID) +// opts.SetPassword(cfg.ThingKey) +// } else { +// opts.SetUsername(content.Agent.MQTT.Username) +// opts.SetPassword(content.Agent.MQTT.Password) +// } + +// opts.SetClientID(fmt.Sprintf("ui-terminal-%s", cfg.ThingID)) +// client := mqtt.NewClient(opts) + +// if token := client.Connect(); token.Wait() && token.Error() != nil { +// return token.Error() +// } + +// req := []mfsenml.Record{ +// {BaseName: "1", Name: "exec", StringValue: &command}, +// } +// reqByte, err1 := json.Marshal(req) +// if err1 != nil { +// return err1 +// } + +// token := client.Publish(pubTopic, 0, false, string(reqByte)) +// token.Wait() + +// if token.Error() != nil { +// return token.Error() +// } + +// var wg sync.WaitGroup +// wg.Add(1) +// errChan := make(chan error) + +// client.Subscribe(subTopic, 0, func(_ mqtt.Client, m mqtt.Message) { +// var data []mfsenml.Record +// if err := json.Unmarshal(m.Payload(), &data); err != nil { +// errChan <- err +// } +// res <- *data[0].StringValue +// wg.Done() +// }) + +// select { +// case <-ctx.Done(): +// log.Println("ProcessTerminalCommand canceled") +// case <-time.After(time.Second * 5): +// log.Println("Timeout occurred") +// res <- "timeout" +// case err := <-errChan: +// return err +// case <-res: +// wg.Wait() +// } + +// client.Disconnect(250) +// return nil +// } func (us *uiService) GetEntities(token, item, name string, page, limit uint64) ([]byte, error) { offset := (page - 1) * limit diff --git a/vendor/github.com/mainflux/mainflux/.dockerignore b/vendor/github.com/absmach/magistrala/.dockerignore similarity index 100% rename from vendor/github.com/mainflux/mainflux/.dockerignore rename to vendor/github.com/absmach/magistrala/.dockerignore diff --git a/vendor/github.com/mainflux/mainflux/.gitignore b/vendor/github.com/absmach/magistrala/.gitignore similarity index 78% rename from vendor/github.com/mainflux/mainflux/.gitignore rename to vendor/github.com/absmach/magistrala/.gitignore index b9488c63..fb4ee7e8 100644 --- a/vendor/github.com/mainflux/mainflux/.gitignore +++ b/vendor/github.com/absmach/magistrala/.gitignore @@ -1,4 +1,4 @@ -# Copyright (c) Mainflux +# Copyright (c) Abstract Machines # SPDX-License-Identifier: Apache-2.0 # Set your private global .gitignore: @@ -10,4 +10,4 @@ build tools/e2e/e2e tools/mqtt-bench/mqtt-bench tools/provision/provision -tools/provision/mfconn.toml +tools/provision/mgconn.toml diff --git a/vendor/github.com/mainflux/mainflux/.golangci.yml b/vendor/github.com/absmach/magistrala/.golangci.yml similarity index 97% rename from vendor/github.com/mainflux/mainflux/.golangci.yml rename to vendor/github.com/absmach/magistrala/.golangci.yml index 21782aa7..829193f6 100644 --- a/vendor/github.com/mainflux/mainflux/.golangci.yml +++ b/vendor/github.com/absmach/magistrala/.golangci.yml @@ -29,7 +29,7 @@ linters-settings: checks: ["-ST1000", "-ST1003", "-ST1020", "-ST1021", "-ST1022"] goheader: template: |- - Copyright (c) Mainflux + Copyright (c) Magistrala SPDX-License-Identifier: Apache-2.0 linters: diff --git a/vendor/github.com/mainflux/mainflux/ADOPTERS.md b/vendor/github.com/absmach/magistrala/ADOPTERS.md similarity index 65% rename from vendor/github.com/mainflux/mainflux/ADOPTERS.md rename to vendor/github.com/absmach/magistrala/ADOPTERS.md index 386209c2..d0479a15 100644 --- a/vendor/github.com/mainflux/mainflux/ADOPTERS.md +++ b/vendor/github.com/absmach/magistrala/ADOPTERS.md @@ -1,12 +1,12 @@ # Adopters -As Mainflux Community grows, we'd like to keep track of Mainflux adopters to grow the community, contact other users, share experiences and best practices. +As Magistrala Community grows, we'd like to keep track of Magistrala adopters to grow the community, contact other users, share experiences and best practices. -To accomplish this, we created a public ledger. The list of organizations and users who consider themselves as Mainflux adopters and that **publicly/officially** shared information and/or details of their adoption journey(optional). +To accomplish this, we created a public ledger. The list of organizations and users who consider themselves as Magistrala adopters and that **publicly/officially** shared information and/or details of their adoption journey(optional). Where users themselves directly maintain the list. ## Adding yourself as an adopter -If you are using Mainflux, please consider adding yourself as an adopter with a brief description of your use case by opening a pull request to this file and adding a section describing your adoption of Mainflux technology. +If you are using Magistrala, please consider adding yourself as an adopter with a brief description of your use case by opening a pull request to this file and adding a section describing your adoption of Magistrala technology. **Please send PRs to add or remove organizations/users** @@ -25,12 +25,12 @@ Pull request commit must be [signed](https://docs.github.com/en/github/authentic * There is no minimum requirement or adaptation size, but we request to list permanent deployments only, i.e., no demo or trial deployments. Commercial or production use is not required. A well-done home lab setup can be equally impressive as a large-scale commercial deployment. -**The list of organizations/users that have publicly shared the usage of Mainflux:** +**The list of organizations/users that have publicly shared the usage of Magistrala:** -**Note**: Several other organizations/users couldn't publicly share their usage details but are active project contributors and Mainflux Community members. +**Note**: Several other organizations/users couldn't publicly share their usage details but are active project contributors and Magistrala Community members. ## Adopters list (alphabetical) -**Note:** The list is maintained by the users themselves. If you find yourself on this list, and you think it's inappropriate. Please contact [project maintainers](https://github.com/mainflux/mainflux/blob/master/MAINTAINERS) and you will be permanently removed from the list. +**Note:** The list is maintained by the users themselves. If you find yourself on this list, and you think it's inappropriate. Please contact [project maintainers](https://github.com/absmach/magistrala/blob/master/MAINTAINERS) and you will be permanently removed from the list. diff --git a/vendor/github.com/mainflux/mainflux/CHANGELOG.md b/vendor/github.com/absmach/magistrala/CHANGELOG.md similarity index 97% rename from vendor/github.com/mainflux/mainflux/CHANGELOG.md rename to vendor/github.com/absmach/magistrala/CHANGELOG.md index 082ccd31..edd5e6c5 100644 --- a/vendor/github.com/mainflux/mainflux/CHANGELOG.md +++ b/vendor/github.com/absmach/magistrala/CHANGELOG.md @@ -1,7 +1,7 @@ -# Mainflux Changelog +# Magistrala Changelog ## Generation -Mainflux release notes for the latest release can be obtained via: +Magistrala release notes for the latest release can be obtained via: ``` make changelog ``` @@ -18,7 +18,7 @@ git log --pretty=oneline --abbrev-commit - NOISSUE - Update changelog and readme for release 0.13.0 - MF-1582 - Fix lora-adapter MQTT client (#1583) - NOISSUE - Fix CoAP adapter (#1572) -- NOISSUE - Unify MF_INFLUX_READER_DB_HOST and MF_INFLUX_WRITER_DB_HOST envars (#1585) +- NOISSUE - Unify MG_INFLUX_READER_DB_HOST and MG_INFLUX_WRITER_DB_HOST envars (#1585) - MF-1580 - Influxdb Writer changes format of update-time to string (#1581) - MF-1575 Add 'Name' field to ListMembers response in things svc (#1576) - MF-1565 - Document Bearer, Thing and Basic Authorization header (#1566) @@ -199,18 +199,18 @@ git log --pretty=oneline --abbrev-commit - NOISSUE - Add subtopic wildcard for twin attribute's definition (#1214) - fix envs for nginx (#1215) - Remove twin mqtt related obsolete var and fix es-redis address (#1213) -- NOISSUE - Remove unused `MF_THINGS_SECRET` env var (#1211) +- NOISSUE - Remove unused `MG_THINGS_SECRET` env var (#1211) - NOISSUE - Fix some typos (#1212) - NOISSUE - Remove unknown Bootstrap requests (#1210) - NOISSUE - Use `pgcrypto` instead `uuid-ossp` for UUIDs generation (version 4) (#1208) - MF-1198 - Add errors package tests (#1207) - MF-1025 - timeout env in sec, use parseduration (#1206) -- MF-1201 - Fix MF_THINGS_AUTH_GRPC_URL mongo reader ENVAR (#1203) +- MF-1201 - Fix MG_THINGS_AUTH_GRPC_URL mongo reader ENVAR (#1203) - NOISSUE - Fix CI (#1204) - MF-1180 - Add redis based twins and states cache (#1184) - MF-739 - Add ID to the User entity (#1152) - NOISSUE - Fix default db name for storage databases (#1194) -- NOISSUE - Add `MF_DOCKER_IMAGE_NAME_PREFIX` to Makefile (#1173) +- NOISSUE - Add `MG_DOCKER_IMAGE_NAME_PREFIX` to Makefile (#1173) - MF-1154 - Move UUID provider to project root (#1172) - Fix typo in error messages (#1193) - MF-1190 - Add pkg for library packages (#1191) @@ -224,7 +224,7 @@ git log --pretty=oneline --abbrev-commit - MF-995 - Add Twins tests for endpoint list twins and list states (#1174) - NOISSUE - Update dependencies (#1176) - MF-1163 - Fix influxdb-reader to use nanoseconds precision (#1171) -- Rename environment variable MF_MQTT_ADAPTER_PORT to MF_MQTT_ADAPTER_MQTT_PORT in docker environment (#1170) +- Rename environment variable MG_MQTT_ADAPTER_PORT to MG_MQTT_ADAPTER_MQTT_PORT in docker environment (#1170) - Remove thing related code from twins service (#1169) - MF-997 - Add twins service swagger file (#1167) - MF-1079 - Add MQTT forwarder (#1164) @@ -341,7 +341,7 @@ git log --pretty=oneline --abbrev-commit - MF-898 - Add transactions to postgres connect (#940) - Add missing user service tests (#945) - Remove Normalizer service from compose (#937) -- MF-919 - Mainflux message updates (#924) +- MF-919 - Magistrala message updates (#924) - NOISSUE - Remove ARM multi-arch images (#929) - MF-906 - Change single creation endpoints to use bulk service calls (#927) - MF-922 - Add UpdateUser endpoint (#923) @@ -493,7 +493,7 @@ git log --pretty=oneline --abbrev-commit - Fix CI with fixed plugin versions (#747) - fix building problems (#741) - fix docker-compose env (#775) -- Fix MF_THINGS_AUTH_GRPC_PORT in addons' docker-compose files (#781) +- Fix MG_THINGS_AUTH_GRPC_PORT in addons' docker-compose files (#781) - Fix MQTT raw message deserialization (#753) - fix variant option for manifest annotate (#765) - fix to makefile for OSX/Darwin (#724) @@ -510,7 +510,7 @@ git log --pretty=oneline --abbrev-commit ### Summary -https://github.com/mainflux/mainflux/milestone/10?closed=1 +https://github.com/absmach/magistrala/milestone/10?closed=1 ## 0.8.0 - 20. MAR 2019. ### Features @@ -576,7 +576,7 @@ https://github.com/mainflux/mainflux/milestone/10?closed=1 - NOISSUE - Fix normalizer exposed port in docker-compose (#548) ### Summary -https://github.com/mainflux/mainflux/milestone/9?closed=1 +https://github.com/absmach/magistrala/milestone/9?closed=1 ## 0.7.0 - 08. DEC 2018. @@ -601,10 +601,10 @@ https://github.com/mainflux/mainflux/milestone/9?closed=1 - MF-454 - Use message Time field as a time for InfluxDB points (#455) - NOISSUE - Add .dockerignore to project root (#457) - Update docker-compose so that every service has debug log level (#453) -- NOISSUE - Add TLS flag for Mainflux services (#452) +- NOISSUE - Add TLS flag for Magistrala services (#452) - MF-448 - Option for Postgres SSL Mode (#449) - MF-443 Update project dependencies (#444) -- MF-426 - Add optional MF_CA_CERTS env variable to allow GRPC client to use TLS certs (#430) +- MF-426 - Add optional MG_CA_CERTS env variable to allow GRPC client to use TLS certs (#430) - Expose the InfluxDB and Cassandra ports to host (#441) - MF-374 - Bring back CoAP adapter (#413) @@ -613,7 +613,7 @@ https://github.com/mainflux/mainflux/milestone/9?closed=1 - MF-407 - Values of zero are being omitted (#434) ### Summary -https://github.com/mainflux/mainflux/milestone/8?closed=1 +https://github.com/absmach/magistrala/milestone/8?closed=1 ## 0.6.0 - 26. OCT 2018. @@ -653,7 +653,7 @@ https://github.com/mainflux/mainflux/milestone/8?closed=1 - Vendor correct gRPC version (#340) ### Summary -https://github.com/mainflux/mainflux/milestone/6?closed=1 +https://github.com/absmach/magistrala/milestone/6?closed=1 ## 0.4.0 - 01. JUN 2018. diff --git a/vendor/github.com/mainflux/mainflux/CONTRIBUTING.md b/vendor/github.com/absmach/magistrala/CONTRIBUTING.md similarity index 81% rename from vendor/github.com/mainflux/mainflux/CONTRIBUTING.md rename to vendor/github.com/absmach/magistrala/CONTRIBUTING.md index 4649acc3..35a196aa 100644 --- a/vendor/github.com/mainflux/mainflux/CONTRIBUTING.md +++ b/vendor/github.com/absmach/magistrala/CONTRIBUTING.md @@ -1,11 +1,11 @@ -# Contributing to Mainflux +# Contributing to Magistrala -The following is a set of guidelines to contribute to Mainflux and its libraries, which are -hosted on the [Mainflux Organization](https://github.com/mainflux) on GitHub. +The following is a set of guidelines to contribute to Magistrala and its libraries, which are +hosted on the [Abstract Machines Organization](https://github.com/absmach) on GitHub. This project adheres to the [Contributor Covenant 1.2](http://contributor-covenant.org/version/1/2/0). By participating, you are expected to uphold this code. Please report unacceptable behavior to -[abuse@mainflux.com](mailto:abuse@mainflux.com). +[abuse@magistrala.com](mailto:abuse@magistrala.com). ## Reporting issues @@ -13,7 +13,7 @@ Reporting issues are a great way to contribute to the project. We are perpetuall thorough bug report. Before raising a new issue, check [our issue -list](https://github.com/mainflux/mainflux/issues) to determine if it already contains the +list](https://github.com/absmach/magistrala/issues) to determine if it already contains the problem that you are facing. A good bug report shouldn't leave others needing to chase you for more information. Please be as detailed as possible. The following questions might serve as a template for writing a detailed @@ -41,23 +41,23 @@ To contribute to the project, [fork](https://help.github.com/articles/fork-a-rep clone your fork repository, and configure the remotes: ``` -git clone https://github.com//mainflux.git -cd mainflux -git remote add upstream https://github.com/mainflux/mainflux.git +git clone https://github.com//magistrala.git +cd magistrala +git remote add upstream https://github.com/absmach/magistrala.git ``` If your cloned repository is behind the upstream commits, then get the latest changes from upstream: ``` git checkout master -git pull --rebase upstream master +git pull --rebase upstream main ``` -Create a new topic branch from `master` using the naming convention `MF-[issue-number]` +Create a new topic branch from `master` using the naming convention `MG-[issue-number]` to help us keep track of your contribution scope: ``` -git checkout -b MF-[issue-number] +git checkout -b MG-[issue-number] ``` Commit your changes in logical chunks. When you are ready to commit, make sure @@ -74,13 +74,13 @@ and `user.email` git configs, you can sign your commit automatically with `git c Locally merge (or rebase) the upstream development branch into your topic branch: ``` -git pull --rebase upstream master +git pull --rebase upstream main ``` Push your topic branch up to your fork: ``` -git push origin MF-[issue-number] +git push origin MG-[issue-number] ``` [Open a Pull Request](https://help.github.com/articles/using-pull-requests/) with a clear title diff --git a/vendor/github.com/mainflux/agent/LICENSE b/vendor/github.com/absmach/magistrala/LICENSE similarity index 99% rename from vendor/github.com/mainflux/agent/LICENSE rename to vendor/github.com/absmach/magistrala/LICENSE index 478338e5..0cb81525 100644 --- a/vendor/github.com/mainflux/agent/LICENSE +++ b/vendor/github.com/absmach/magistrala/LICENSE @@ -176,7 +176,7 @@ END OF TERMS AND CONDITIONS - Copyright 2015-2019 Mainflux + Copyright 2015-2020 Magistrala Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/mainflux/mainflux/MAINTAINERS b/vendor/github.com/absmach/magistrala/MAINTAINERS similarity index 55% rename from vendor/github.com/mainflux/mainflux/MAINTAINERS rename to vendor/github.com/absmach/magistrala/MAINTAINERS index a5402ac0..8df02cf4 100644 --- a/vendor/github.com/mainflux/mainflux/MAINTAINERS +++ b/vendor/github.com/absmach/magistrala/MAINTAINERS @@ -1,4 +1,4 @@ -# Mainflux follows the timeless, highly efficient and totally unfair system +# Magistrala follows the timeless, highly efficient and totally unfair system # known as [Benevolent dictator for # life](https://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with # Drasko DRASKOVIC in the role of BDFL. @@ -6,8 +6,8 @@ [bdfl] [[drasko]] - Name = "Drasko DRASKOVIC" - Email = "drasko@mainflux.com" + Name = "Drasko Draskovic" + Email = "draasko.draskovic@abstractmachines.fr" GitHub = "drasko" # However, this role serves only in dead-lock events, or in a special and very rare cases @@ -18,29 +18,13 @@ # Maintainers have the special role in the project in managing and accepting PRs, # overall leading the project and making design decisions on the maintained subsystems. # -# A reference list of all maintainers of the Mainflux project. +# A reference list of all maintainers of the Magistrala project. # ADD YOURSELF HERE IN ALPHABETICAL ORDER [maintainers] - - [[aleksandar]] - Name = "Aleksandar NOVAKOVIC" - Email = "aleksandar@mainflux.com" - GitHub = "anovakovic01" [[dusan]] - Name = "Dusan BOROVCANIN" - Email = "dusan@mainflux.com" - GitHub = "dusanb94" - - [[manuel]] - Name = "Manuel IMPERIALE" - Email = "manuel@mainflux.com" - GitHub = "manuIO" - - [[nikola]] - Name = "Nikola MARCETIC" - Email = "nikola@mainflux.com" - GitHub = "nmarcetic" - + Name = "Dusan Borovcanin" + Email = "dusan.borovcanin@abstractmachines.fr" + GitHub = "dborovcanin" diff --git a/vendor/github.com/mainflux/mainflux/Makefile b/vendor/github.com/absmach/magistrala/Makefile similarity index 71% rename from vendor/github.com/mainflux/mainflux/Makefile rename to vendor/github.com/absmach/magistrala/Makefile index 17ecb171..9400256a 100644 --- a/vendor/github.com/mainflux/mainflux/Makefile +++ b/vendor/github.com/absmach/magistrala/Makefile @@ -1,7 +1,7 @@ -# Copyright (c) Mainflux +# Copyright (c) Abstract Machines # SPDX-License-Identifier: Apache-2.0 -MF_DOCKER_IMAGE_NAME_PREFIX ?= mainflux +MG_DOCKER_IMAGE_NAME_PREFIX ?= magistrala BUILD_DIR = build SERVICES = auth users things http coap ws lora influxdb-writer influxdb-reader mongodb-writer \ mongodb-reader cassandra-writer cassandra-reader postgres-writer postgres-reader timescale-writer timescale-reader cli \ @@ -22,27 +22,32 @@ DOCKER_PROJECT ?= $(shell echo $(subst $(space),,$(USER_REPO)_$(BRANCH)) | tr -c DOCKER_COMPOSE_COMMANDS_SUPPORTED := up down config DEFAULT_DOCKER_COMPOSE_COMMAND := up GRPC_MTLS_CERT_FILES_EXISTS = 0 -DOCKER_PROFILE ?= $(MF_MQTT_BROKER_TYPE)_$(MF_MESSAGE_BROKER_TYPE) -ifneq ($(MF_MESSAGE_BROKER_TYPE),) - MF_MESSAGE_BROKER_TYPE := $(MF_MESSAGE_BROKER_TYPE) +DOCKER_PROFILE ?= $(MG_MQTT_BROKER_TYPE)_$(MG_MESSAGE_BROKER_TYPE) +ifneq ($(MG_MESSAGE_BROKER_TYPE),) + MG_MESSAGE_BROKER_TYPE := $(MG_MESSAGE_BROKER_TYPE) else - MF_MESSAGE_BROKER_TYPE=nats + MG_MESSAGE_BROKER_TYPE=nats endif -ifneq ($(MF_MQTT_BROKER_TYPE),) - MF_MQTT_BROKER_TYPE := $(MF_MQTT_BROKER_TYPE) +ifneq ($(MG_MQTT_BROKER_TYPE),) + MG_MQTT_BROKER_TYPE := $(MG_MQTT_BROKER_TYPE) else - MF_MQTT_BROKER_TYPE=nats + MG_MQTT_BROKER_TYPE=nats endif +ifneq ($(MG_ES_STORE_TYPE),) + MG_ES_STORE_TYPE := $(MG_ES_STORE_TYPE) +else + MG_ES_STORE_TYPE=nats +endif define compile_service CGO_ENABLED=$(CGO_ENABLED) GOOS=$(GOOS) GOARCH=$(GOARCH) GOARM=$(GOARM) \ - go build -mod=vendor -tags $(MF_MESSAGE_BROKER_TYPE) -ldflags "-s -w \ - -X 'github.com/mainflux/mainflux.BuildTime=$(TIME)' \ - -X 'github.com/mainflux/mainflux.Version=$(VERSION)' \ - -X 'github.com/mainflux/mainflux.Commit=$(COMMIT)'" \ - -o ${BUILD_DIR}/mainflux-$(1) cmd/$(1)/main.go + go build -tags $(MG_MESSAGE_BROKER_TYPE) --tags $(MG_ES_STORE_TYPE) -ldflags "-s -w \ + -X 'github.com/absmach/magistrala.BuildTime=$(TIME)' \ + -X 'github.com/absmach/magistrala.Version=$(VERSION)' \ + -X 'github.com/absmach/magistrala.Commit=$(COMMIT)'" \ + -o ${BUILD_DIR}/$(1) cmd/$(1)/main.go endef define make_docker @@ -56,7 +61,7 @@ define make_docker --build-arg VERSION=$(VERSION) \ --build-arg COMMIT=$(COMMIT) \ --build-arg TIME=$(TIME) \ - --tag=$(MF_DOCKER_IMAGE_NAME_PREFIX)/$(svc) \ + --tag=$(MG_DOCKER_IMAGE_NAME_PREFIX)/$(svc) \ -f docker/Dockerfile . endef @@ -66,7 +71,7 @@ define make_docker_dev docker build \ --no-cache \ --build-arg SVC=$(svc) \ - --tag=$(MF_DOCKER_IMAGE_NAME_PREFIX)/$(svc) \ + --tag=$(MG_DOCKER_IMAGE_NAME_PREFIX)/$(svc) \ -f docker/Dockerfile.dev ./build endef @@ -106,11 +111,11 @@ clean: cleandocker: # Stops containers and removes containers, networks, volumes, and images created by up - docker-compose -f docker/docker-compose.yml down --rmi all -v --remove-orphans + docker-compose -f docker/docker-compose.yml --profile $(DOCKER_PROFILE) -p $(DOCKER_PROJECT) down --rmi all -v --remove-orphans ifdef pv # Remove unused volumes - docker volume ls -f name=$(MF_DOCKER_IMAGE_NAME_PREFIX) -f dangling=true -q | xargs -r docker volume rm + docker volume ls -f name=$(MG_DOCKER_IMAGE_NAME_PREFIX) -f dangling=true -q | xargs -r docker volume rm endif install: @@ -137,7 +142,7 @@ dockers_dev: $(DOCKERS_DEV) define docker_push for svc in $(SERVICES); do \ - docker push $(MF_DOCKER_IMAGE_NAME_PREFIX)/$$svc:$(1); \ + docker push $(MG_DOCKER_IMAGE_NAME_PREFIX)/$$svc:$(1); \ done endef @@ -152,7 +157,7 @@ release: git checkout $(version) $(MAKE) dockers for svc in $(SERVICES); do \ - docker tag $(MF_DOCKER_IMAGE_NAME_PREFIX)/$$svc $(MF_DOCKER_IMAGE_NAME_PREFIX)/$$svc:$(version); \ + docker tag $(MG_DOCKER_IMAGE_NAME_PREFIX)/$$svc $(MG_DOCKER_IMAGE_NAME_PREFIX)/$$svc:$(version); \ done $(call docker_push,$(version)) @@ -192,30 +197,30 @@ endif endif define edit_docker_config - sed -i "s/MF_MQTT_BROKER_TYPE=.*/MF_MQTT_BROKER_TYPE=$(1)/" docker/.env - sed -i "s/MF_MQTT_BROKER_HEALTH_CHECK=.*/MF_MQTT_BROKER_HEALTH_CHECK=$$\{MF_$(shell echo ${MF_MQTT_BROKER_TYPE} | tr 'a-z' 'A-Z')_HEALTH_CHECK}/" docker/.env - sed -i "s/MF_MQTT_ADAPTER_WS_TARGET_PATH=.*/MF_MQTT_ADAPTER_WS_TARGET_PATH=$$\{MF_$(shell echo ${MF_MQTT_BROKER_TYPE} | tr 'a-z' 'A-Z')_WS_TARGET_PATH}/" docker/.env - sed -i "s/MF_MESSAGE_BROKER_TYPE=.*/MF_MESSAGE_BROKER_TYPE=$(2)/" docker/.env + sed -i "s/MG_MQTT_BROKER_TYPE=.*/MG_MQTT_BROKER_TYPE=$(1)/" docker/.env + sed -i "s/MG_MQTT_BROKER_HEALTH_CHECK=.*/MG_MQTT_BROKER_HEALTH_CHECK=$$\{MG_$(shell echo ${MG_MQTT_BROKER_TYPE} | tr 'a-z' 'A-Z')_HEALTH_CHECK}/" docker/.env + sed -i "s/MG_MQTT_ADAPTER_WS_TARGET_PATH=.*/MG_MQTT_ADAPTER_WS_TARGET_PATH=$$\{MG_$(shell echo ${MG_MQTT_BROKER_TYPE} | tr 'a-z' 'A-Z')_WS_TARGET_PATH}/" docker/.env + sed -i "s/MG_MESSAGE_BROKER_TYPE=.*/MG_MESSAGE_BROKER_TYPE=$(2)/" docker/.env sed -i "s,file: .*.yml,file: $(2).yml," docker/brokers/docker-compose.yml - sed -i "s,MF_MESSAGE_BROKER_URL=.*,MF_MESSAGE_BROKER_URL=$$\{MF_$(shell echo ${MF_MESSAGE_BROKER_TYPE} | tr 'a-z' 'A-Z')_URL\}," docker/.env - sed -i "s,MF_MQTT_ADAPTER_MQTT_QOS=.*,MF_MQTT_ADAPTER_MQTT_QOS=$$\{MF_$(shell echo ${MF_MQTT_BROKER_TYPE} | tr 'a-z' 'A-Z')_MQTT_QOS\}," docker/.env + sed -i "s,MG_MESSAGE_BROKER_URL=.*,MG_MESSAGE_BROKER_URL=$$\{MG_$(shell echo ${MG_MESSAGE_BROKER_TYPE} | tr 'a-z' 'A-Z')_URL\}," docker/.env + sed -i "s,MG_MQTT_ADAPTER_MQTT_QOS=.*,MG_MQTT_ADAPTER_MQTT_QOS=$$\{MG_$(shell echo ${MG_MQTT_BROKER_TYPE} | tr 'a-z' 'A-Z')_MQTT_QOS\}," docker/.env endef change_config: ifeq ($(DOCKER_PROFILE),nats_nats) sed -i "s/- broker/- nats/g" docker/docker-compose.yml sed -i "s/- rabbitmq/- nats/g" docker/docker-compose.yml - sed -i "s,MF_NATS_URL=.*,MF_NATS_URL=nats://nats:$$\{MF_NATS_PORT}," docker/.env + sed -i "s,MG_NATS_URL=.*,MG_NATS_URL=nats://nats:$$\{MG_NATS_PORT}," docker/.env $(call edit_docker_config,nats,nats) else ifeq ($(DOCKER_PROFILE),nats_rabbitmq) sed -i "s/nats/broker/g" docker/docker-compose.yml - sed -i "s,MF_NATS_URL=.*,MF_NATS_URL=nats://nats:$$\{MF_NATS_PORT}," docker/.env + sed -i "s,MG_NATS_URL=.*,MG_NATS_URL=nats://nats:$$\{MG_NATS_PORT}," docker/.env sed -i "s/rabbitmq/broker/g" docker/docker-compose.yml $(call edit_docker_config,nats,rabbitmq) else ifeq ($(DOCKER_PROFILE),vernemq_nats) sed -i "s/nats/broker/g" docker/docker-compose.yml sed -i "s/rabbitmq/broker/g" docker/docker-compose.yml - sed -i "s,MF_NATS_URL=.*,MF_NATS_URL=nats://broker:$$\{MF_NATS_PORT}," docker/.env + sed -i "s,MG_NATS_URL=.*,MG_NATS_URL=nats://broker:$$\{MG_NATS_PORT}," docker/.env $(call edit_docker_config,vernemq,nats) else ifeq ($(DOCKER_PROFILE),vernemq_rabbitmq) sed -i "s/nats/broker/g" docker/docker-compose.yml @@ -226,11 +231,19 @@ else endif run: check_certs change_config +ifeq ($(MG_ES_STORE_TYPE), redis) + sed -i "s/MG_ES_STORE_TYPE=.*/MG_ES_STORE_TYPE=redis/" docker/.env + sed -i "s/MG_ES_STORE_URL=.*/MG_ES_STORE_URL=$$\{MG_REDIS_URL}/" docker/.env + docker-compose -f docker/docker-compose.yml --profile $(DOCKER_PROFILE) --profile redis -p $(DOCKER_PROJECT) $(DOCKER_COMPOSE_COMMAND) $(args) +else + sed -i "s,MG_ES_STORE_TYPE=.*,MG_ES_STORE_TYPE=$$\{MG_MESSAGE_BROKER_TYPE}," docker/.env + sed -i "s,MG_ES_STORE_URL=.*,MG_ES_STORE_URL=$$\{MG_$(shell echo ${MG_MESSAGE_BROKER_TYPE} | tr 'a-z' 'A-Z')_URL\}," docker/.env docker-compose -f docker/docker-compose.yml --profile $(DOCKER_PROFILE) -p $(DOCKER_PROJECT) $(DOCKER_COMPOSE_COMMAND) $(args) +endif run_addons: check_certs $(call change_config) $(foreach SVC,$(RUN_ADDON_ARGS),$(if $(filter $(SVC),$(ADDON_SERVICES) $(EXTERNAL_SERVICES)),,$(error Invalid Service $(SVC)))) @for SVC in $(RUN_ADDON_ARGS); do \ - MF_ADDONS_CERTS_PATH_PREFIX="../." docker-compose -f docker/addons/$$SVC/docker-compose.yml -p $(DOCKER_PROJECT) --env-file ./docker/.env $(DOCKER_COMPOSE_COMMAND) $(args) & \ + MG_ADDONS_CERTS_PATH_PREFIX="../." docker-compose -f docker/addons/$$SVC/docker-compose.yml -p $(DOCKER_PROJECT) --env-file ./docker/.env $(DOCKER_COMPOSE_COMMAND) $(args) & \ done diff --git a/vendor/github.com/mainflux/mainflux/README.md b/vendor/github.com/absmach/magistrala/README.md similarity index 66% rename from vendor/github.com/mainflux/mainflux/README.md rename to vendor/github.com/absmach/magistrala/README.md index 12d4c382..c80c320a 100644 --- a/vendor/github.com/mainflux/mainflux/README.md +++ b/vendor/github.com/absmach/magistrala/README.md @@ -1,4 +1,4 @@ -# Mainflux +# Magistrala [![Build Status](https://mainflux.semaphoreci.com/badges/mainflux/branches/master.svg?style=shields)](https://mainflux.semaphoreci.com/projects/mainflux) [![go report card][grc-badge]][grc-url] @@ -8,7 +8,7 @@ ![banner][banner] -Mainflux is modern, scalable, secure, open-source, and patent-free IoT cloud platform written in Go. +Magistrala is modern, scalable, secure, open-source, and patent-free IoT cloud platform written in Go. It accepts user and thing (sensor, actuator, application) connections over various network protocols (i.e. HTTP, MQTT, WebSocket, CoAP), thus making a seamless bridge between them. It is used as the IoT middleware @@ -36,12 +36,12 @@ For more details, check out the [official documentation][docs]. ## Prerequisites -The following are needed to run Mainflux: +The following are needed to run Magistrala: - [Docker](https://docs.docker.com/install/) (version 20.10) - [Docker compose](https://docs.docker.com/compose/install/) (version 1.29) -Developing Mainflux will also require: +Developing Magistrala will also require: - [Go](https://golang.org/doc/install) (version 1.19.2) - [Protobuf](https://github.com/protocolbuffers/protobuf#protocol-compiler-installation) (version 3.6.1) @@ -54,14 +54,14 @@ Once the prerequisites are installed, execute the following commands from the pr docker-compose -f docker/docker-compose.yml up ``` -This will bring up the Mainflux docker services and interconnect them. This command can also be executed using the project's included Makefile: +This will bring up the Magistrala docker services and interconnect them. This command can also be executed using the project's included Makefile: ```bash make run ``` If you want to run services from specific release checkout code from github and make sure that -`MF_RELEASE_TAG` in [.env](.env) is being set to match the release version +`MG_RELEASE_TAG` in [.env](.env) is being set to match the release version ```bash git checkout tags/ -b @@ -71,65 +71,65 @@ git checkout tags/ -b Check that `.env` file contains: ```bash -MF_RELEASE_TAG= +MG_RELEASE_TAG= ``` >`docker-compose` should be used for development and testing deployments. For production we suggest using [Kubernetes](https://docs.mainflux.io/kubernetes). ## Usage -The quickest way to start using Mainflux is via the CLI. The latest version can be downloaded from the [official releases page][rel]. +The quickest way to start using Magistrala is via the CLI. The latest version can be downloaded from the [official releases page][rel]. It can also be built and used from the project's root directory: ```bash make cli -./build/mainflux-cli version +./build/cli version ``` Additional details on using the CLI can be found in the [CLI documentation](https://docs.mainflux.io/cli). ## Documentation -Official documentation is hosted at [Mainflux official docs page][docs]. Documentation is auto-generated, checkout the instructions on [official docs repository](https://github.com/mainflux/docs): +Official documentation is hosted at [Magistrala official docs page][docs]. Documentation is auto-generated, checkout the instructions on [official docs repository](https://github.com/mainflux/docs): If you spot an error or a need for corrections, please let us know - or even better: send us a PR. ## Authors -Main architect and BDFL of Mainflux project is [@drasko][drasko]. +Main architect and BDFL of Magistrala project is [@drasko][drasko]. Additionally, [@nmarcetic][nikola] and [@janko-isidorovic][janko] assured overall architecture and design, while [@manuio][manu] and [@darkodraskovic][darko] helped with crafting initial implementation and continuously worked on the project evolutions. -Besides them, Mainflux is constantly improved and actively +Besides them, Magistrala is constantly improved and actively developed by [@anovakovic01][alex], [@dusanb94][dusan], [@srados][sava], [@gsaleh][george], [@blokovi][iva], [@chombium][kole], [@mteodor][mirko] and a large set of contributors. Maintainers are listed in [MAINTAINERS](MAINTAINERS) file. -The Mainflux team would like to give special thanks to [@mijicd][dejan] for his monumental work +The Magistrala team would like to give special thanks to [@mijicd][dejan] for his monumental work on designing and implementing a highly improved and optimized version of the platform, and [@malidukica][dusanm] for his effort on implementing the initial user interface. ## Professional Support -There are many companies offering professional support for the Mainflux system. +There are many companies offering professional support for the Magistrala system. If you need this kind of support, best is to reach out to [@drasko][drasko] directly, and he will point you out to the best-matching support team. ## Contributing -Thank you for your interest in Mainflux and the desire to contribute! +Thank you for your interest in Magistrala and the desire to contribute! -1. Take a look at our [open issues](https://github.com/mainflux/mainflux/issues). The [good-first-issue](https://github.com/mainflux/mainflux/labels/good-first-issue) label is specifically for issues that are great for getting started. +1. Take a look at our [open issues](https://github.com/absmach/magistrala/issues). The [good-first-issue](https://github.com/absmach/magistrala/labels/good-first-issue) label is specifically for issues that are great for getting started. 2. Checkout the [contribution guide](CONTRIBUTING.md) to learn more about our style and conventions. 3. Make your changes compatible to our workflow. ### We're Hiring -You like Mainflux and you would like to make it your day job? We're always looking for talented engineers interested in open-source, IoT and distributed systems. If you recognize yourself, reach out to [@drasko][drasko] - he will contact you back. +You like Magistrala and you would like to make it your day job? We're always looking for talented engineers interested in open-source, IoT and distributed systems. If you recognize yourself, reach out to [@drasko][drasko] - he will contact you back. >The best way to grab our attention is, of course, by sending PRs :sunglasses:. @@ -145,34 +145,34 @@ You like Mainflux and you would like to make it your day job? We're always looki [![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fmainflux%2Fmainflux.svg?type=large)](https://app.fossa.com/projects/git%2Bgithub.com%2Fmainflux%2Fmainflux?ref=badge_large) -## Data Collection for Mainflux -Mainflux is committed to continuously improving its services and ensuring a seamless experience for its users. To achieve this, we collect certain data from your deployments. Rest assured, this data is collected solely for the purpose of enhancing Mainflux and is not used with any malicious intent. The deployment summary can be found on our [website][callhome]. +## Data Collection for Magistrala +Magistrala is committed to continuously improving its services and ensuring a seamless experience for its users. To achieve this, we collect certain data from your deployments. Rest assured, this data is collected solely for the purpose of enhancing Magistrala and is not used with any malicious intent. The deployment summary can be found on our [website][callhome]. The collected data includes: - **IP Address** - Used for approximate location information on deployments. - **Services Used** - To understand which features are popular and prioritize future developments. -- **Last Seen Time** - To ensure the stability and availability of Mainflux. -- **Mainflux Version** - To track the software version and deliver relevant updates. +- **Last Seen Time** - To ensure the stability and availability of Magistrala. +- **Magistrala Version** - To track the software version and deliver relevant updates. We take your privacy and data security seriously. All data collected is handled in accordance with our stringent privacy policies and industry best practices. Data collection is on by default and can be disabled by setting the env variable: -`MF_SEND_TELEMETRY=false` +`MG_SEND_TELEMETRY=false` -By utilizing Mainflux, you actively contribute to its improvement. Together, we can build a more robust and efficient IoT platform. Thank you for your trust in Mainflux! +By utilizing Magistrala, you actively contribute to its improvement. Together, we can build a more robust and efficient IoT platform. Thank you for your trust in Magistrala! [banner]: https://github.com/mainflux/docs/blob/master/docs/img/gopherBanner.jpg -[ci-badge]: https://semaphoreci.com/api/v1/mainflux/mainflux/branches/master/badge.svg -[ci-url]: https://semaphoreci.com/mainflux/mainflux +[ci-badge]: https://semaphoreci.com/api/v1/absmach/magistrala/branches/master/badge.svg +[ci-url]: https://semaphoreci.com/absmach/magistrala [docs]: https://docs.mainflux.io [docker]: https://www.docker.com [forum]: https://groups.google.com/forum/#!forum/mainflux -[gitter]: https://gitter.im/mainflux/mainflux?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge +[gitter]: https://gitter.im/absmach/magistrala?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge [gitter-badge]: https://badges.gitter.im/Join%20Chat.svg -[grc-badge]: https://goreportcard.com/badge/github.com/mainflux/mainflux -[grc-url]: https://goreportcard.com/report/github.com/mainflux/mainflux -[cov-badge]: https://codecov.io/gh/mainflux/mainflux/branch/master/graph/badge.svg -[cov-url]: https://codecov.io/gh/mainflux/mainflux +[grc-badge]: https://goreportcard.com/badge/github.com/absmach/magistrala +[grc-url]: https://goreportcard.com/report/github.com/absmach/magistrala +[cov-badge]: https://codecov.io/gh/absmach/magistrala/branch/master/graph/badge.svg +[cov-url]: https://codecov.io/gh/absmach/magistrala [license]: https://img.shields.io/badge/license-Apache%20v2.0-blue.svg [twitter]: https://twitter.com/mainflux [lora]: https://lora-alliance.org/ @@ -180,12 +180,12 @@ By utilizing Mainflux, you actively contribute to its improvement. Together, we [agent]: https://github.com/mainflux/agent [export]: https://github.com/mainflux/export [kubernetes]: https://kubernetes.io/ -[rel]: https://github.com/mainflux/mainflux/releases +[rel]: https://github.com/absmach/magistrala/releases [careers]: https://www.mainflux.com/careers.html [lf]: https://www.linuxfoundation.org/ [edgex]: https://www.edgexfoundry.org/ -[company]: https://www.mainflux.com/ -[blog]: https://medium.com/mainflux-iot-platform +[company]: https://abstractmachines.fr +[blog]: https://medium.com/abstract-machines-blog [drasko]: https://github.com/drasko [nikola]: https://github.com/nmarcetic [dejan]: https://github.com/mijicd @@ -193,7 +193,7 @@ By utilizing Mainflux, you actively contribute to its improvement. Together, we [darko]: https://github.com/darkodraskovic [janko]: https://github.com/janko-isidorovic [alex]: https://github.com/anovakovic01 -[dusan]: https://github.com/dusanb94 +[dusan]: https://github.com/dborovcanin [sava]: https://github.com/srados [george]: https://github.com/gesaleh [iva]: https://github.com/blokovi diff --git a/vendor/github.com/mainflux/mainflux/api.go b/vendor/github.com/absmach/magistrala/api.go similarity index 87% rename from vendor/github.com/mainflux/mainflux/api.go rename to vendor/github.com/absmach/magistrala/api.go index 39494f56..33e5eaef 100644 --- a/vendor/github.com/mainflux/mainflux/api.go +++ b/vendor/github.com/absmach/magistrala/api.go @@ -1,7 +1,7 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 -package mainflux +package magistrala // Response contains HTTP response specific methods. type Response interface { diff --git a/vendor/github.com/mainflux/mainflux/auth.pb.go b/vendor/github.com/absmach/magistrala/auth.pb.go similarity index 69% rename from vendor/github.com/mainflux/mainflux/auth.pb.go rename to vendor/github.com/absmach/magistrala/auth.pb.go index bc1d45a7..5f37fa98 100644 --- a/vendor/github.com/mainflux/mainflux/auth.pb.go +++ b/vendor/github.com/absmach/magistrala/auth.pb.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 // Code generated by protoc-gen-go. DO NOT EDIT. @@ -7,7 +7,7 @@ // protoc v4.24.3 // source: auth.proto -package mainflux +package magistrala import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -1473,118 +1473,140 @@ func (x *CountSubjectsRes) GetCount() int64 { var File_auth_proto protoreflect.FileDescriptor var file_auth_proto_rawDesc = []byte{ - 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x6d, 0x61, - 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x22, 0x83, 0x01, 0x0a, 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x12, 0x20, 0x0a, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x12, 0x27, 0x0a, 0x0c, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x72, 0x65, 0x66, 0x72, - 0x65, 0x73, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x1e, 0x0a, 0x0a, 0x61, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x79, 0x70, 0x65, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, - 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x23, 0x0a, 0x0b, - 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x12, 0x14, 0x0a, 0x05, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x22, 0x1d, 0x0a, 0x0b, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, - 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, - 0x22, 0x2e, 0x0a, 0x08, 0x49, 0x73, 0x73, 0x75, 0x65, 0x52, 0x65, 0x71, 0x12, 0x0e, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x22, 0x32, 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x0e, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, - 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, - 0x6d, 0x61, 0x69, 0x6e, 0x22, 0x22, 0x0a, 0x0a, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, - 0x65, 0x71, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xac, 0x02, 0x0a, 0x0c, 0x41, 0x75, 0x74, - 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, + 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6d, 0x61, + 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x6c, 0x61, 0x22, 0x83, 0x01, 0x0a, 0x05, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x27, 0x0a, 0x0c, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x72, 0x65, + 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x1e, 0x0a, + 0x0a, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x79, 0x70, 0x65, 0x42, 0x0f, 0x0a, + 0x0d, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x23, + 0x0a, 0x0b, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x12, 0x14, 0x0a, + 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x22, 0x1d, 0x0a, 0x0b, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, + 0x65, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x64, 0x22, 0x2e, 0x0a, 0x08, 0x49, 0x73, 0x73, 0x75, 0x65, 0x52, 0x65, 0x71, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, + 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x22, 0x32, 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, + 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0x22, 0x0a, 0x0a, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, + 0x68, 0x52, 0x65, 0x71, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xac, 0x02, 0x0a, 0x0c, 0x41, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x12, 0x1c, 0x0a, 0x09, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x75, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4b, 0x69, 0x6e, 0x64, 0x12, + 0x29, 0x0a, 0x10, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x6c, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x75, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x22, 0x3e, 0x0a, 0x0c, 0x41, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x89, 0x02, 0x0a, 0x0c, 0x41, 0x64, + 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x73, + 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, + 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x22, 0x2e, 0x0a, 0x0c, 0x41, 0x64, 0x64, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, + 0x7a, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x7a, 0x65, 0x64, 0x22, 0x8c, 0x02, 0x0a, 0x0f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, - 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x75, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x29, 0x0a, - 0x10, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, - 0x0a, 0x0a, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, - 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x22, 0x3e, 0x0a, 0x0c, 0x41, 0x75, 0x74, 0x68, 0x6f, - 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x6f, - 0x72, 0x69, 0x7a, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x75, 0x74, - 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x89, 0x02, 0x0a, 0x0c, 0x41, 0x64, 0x64, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x75, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x75, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x6c, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1a, - 0x0a, 0x08, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x65, - 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, - 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x22, 0x2e, 0x0a, 0x0c, 0x41, 0x64, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x52, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, - 0x7a, 0x65, 0x64, 0x22, 0x8c, 0x02, 0x0a, 0x0f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x75, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x75, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0f, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x6c, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1a, 0x0a, - 0x08, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x65, 0x72, - 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, - 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x22, 0x2b, 0x0a, 0x0f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x52, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, - 0xc7, 0x02, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, - 0x65, 0x71, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, - 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, - 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, - 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x6c, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x6c, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1f, 0x0a, 0x0b, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x24, 0x0a, - 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, - 0x6b, 0x65, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x52, 0x0a, 0x0e, 0x4c, 0x69, 0x73, - 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, - 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, - 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, - 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, - 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xb2, 0x02, - 0x0a, 0x0f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, + 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x75, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x6c, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x70, + 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x22, 0x2b, 0x0a, 0x0f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x64, 0x22, 0xc7, 0x02, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x52, 0x65, 0x71, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, + 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, + 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x65, 0x72, 0x6d, + 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1f, + 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x24, 0x0a, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x52, 0x0a, 0x0e, 0x4c, + 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x12, 0x1a, 0x0a, + 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x6e, 0x65, 0x78, + 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, + 0xb2, 0x02, 0x0a, 0x0f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x52, 0x65, 0x71, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, + 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, + 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x6c, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x6c, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x65, 0x72, 0x6d, 0x69, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1f, 0x0a, + 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x24, + 0x0a, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x27, 0x0a, 0x0f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc8, 0x02, + 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, @@ -1603,117 +1625,99 @@ var file_auth_proto_rawDesc = []byte{ 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x22, 0x27, 0x0a, 0x0f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x52, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc8, 0x02, 0x0a, 0x0f, - 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x12, - 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, - 0x0c, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x29, 0x0a, 0x10, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x6c, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x75, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, - 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x6e, 0x65, - 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x53, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x6c, - 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, - 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, - 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, - 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xb3, 0x02, 0x0a, 0x10, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, - 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x6c, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x75, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, - 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, - 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x6e, - 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x22, 0x28, 0x0a, 0x10, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x52, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x32, 0x4d, 0x0a, 0x0c, 0x41, - 0x75, 0x74, 0x68, 0x7a, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x3d, 0x0a, 0x09, 0x41, - 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x12, 0x16, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, - 0x6c, 0x75, 0x78, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, - 0x1a, 0x16, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x41, 0x75, 0x74, 0x68, - 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x22, 0x00, 0x32, 0xd6, 0x06, 0x0a, 0x0b, 0x41, - 0x75, 0x74, 0x68, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x49, 0x73, - 0x73, 0x75, 0x65, 0x12, 0x12, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x49, - 0x73, 0x73, 0x75, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x0f, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, - 0x75, 0x78, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x00, 0x12, 0x2e, 0x0a, 0x05, 0x4c, 0x6f, - 0x67, 0x69, 0x6e, 0x12, 0x12, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x4c, - 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x0f, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, - 0x75, 0x78, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x00, 0x12, 0x32, 0x0a, 0x07, 0x52, 0x65, - 0x66, 0x72, 0x65, 0x73, 0x68, 0x12, 0x14, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, - 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x71, 0x1a, 0x0f, 0x2e, 0x6d, 0x61, - 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x00, 0x12, 0x3a, - 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x79, 0x12, 0x15, 0x2e, 0x6d, 0x61, 0x69, - 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, - 0x71, 0x1a, 0x15, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x49, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x09, 0x41, 0x75, - 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x12, 0x16, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, - 0x75, 0x78, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x1a, - 0x16, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, - 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x09, 0x41, 0x64, 0x64, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x16, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, - 0x78, 0x2e, 0x41, 0x64, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x1a, 0x16, - 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x41, 0x64, 0x64, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x19, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, - 0x6c, 0x75, 0x78, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x52, 0x65, 0x71, 0x1a, 0x19, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x22, 0x00, - 0x12, 0x43, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, - 0x18, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x6d, 0x61, 0x69, 0x6e, - 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x6c, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x18, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, - 0x75, 0x78, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, - 0x71, 0x1a, 0x18, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x4c, 0x69, 0x73, - 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, 0x46, 0x0a, - 0x0c, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x19, 0x2e, - 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x1a, 0x19, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, - 0x6c, 0x75, 0x78, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x19, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, + 0x65, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x53, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, + 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, + 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xb3, 0x02, + 0x0a, 0x10, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, + 0x65, 0x71, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, + 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, + 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, + 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x6c, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x6c, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1f, 0x0a, 0x0b, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x24, 0x0a, + 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x22, 0x28, 0x0a, 0x10, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x75, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x32, 0x51, 0x0a, + 0x0c, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x41, 0x0a, + 0x09, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x12, 0x18, 0x2e, 0x6d, 0x61, 0x67, + 0x69, 0x73, 0x74, 0x72, 0x61, 0x6c, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, + 0x65, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x6d, 0x61, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x6c, + 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x22, 0x00, + 0x32, 0x8a, 0x07, 0x0a, 0x0b, 0x41, 0x75, 0x74, 0x68, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x32, 0x0a, 0x05, 0x49, 0x73, 0x73, 0x75, 0x65, 0x12, 0x14, 0x2e, 0x6d, 0x61, 0x67, 0x69, + 0x73, 0x74, 0x72, 0x61, 0x6c, 0x61, 0x2e, 0x49, 0x73, 0x73, 0x75, 0x65, 0x52, 0x65, 0x71, 0x1a, + 0x11, 0x2e, 0x6d, 0x61, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x6c, 0x61, 0x2e, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x22, 0x00, 0x12, 0x32, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x14, 0x2e, + 0x6d, 0x61, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x6c, 0x61, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, + 0x52, 0x65, 0x71, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x6c, 0x61, + 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x00, 0x12, 0x36, 0x0a, 0x07, 0x52, 0x65, 0x66, 0x72, + 0x65, 0x73, 0x68, 0x12, 0x16, 0x2e, 0x6d, 0x61, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x6c, 0x61, + 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x71, 0x1a, 0x11, 0x2e, 0x6d, 0x61, + 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x6c, 0x61, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x00, + 0x12, 0x3e, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x79, 0x12, 0x17, 0x2e, 0x6d, + 0x61, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x6c, 0x61, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x6d, 0x61, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, + 0x6c, 0x61, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x22, 0x00, + 0x12, 0x41, 0x0a, 0x09, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x12, 0x18, 0x2e, + 0x6d, 0x61, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x6c, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x6d, 0x61, 0x67, 0x69, 0x73, 0x74, + 0x72, 0x61, 0x6c, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, + 0x73, 0x22, 0x00, 0x12, 0x41, 0x0a, 0x09, 0x41, 0x64, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x12, 0x18, 0x2e, 0x6d, 0x61, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x6c, 0x61, 0x2e, 0x41, 0x64, + 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x6d, 0x61, 0x67, + 0x69, 0x73, 0x74, 0x72, 0x61, 0x6c, 0x61, 0x2e, 0x41, 0x64, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1b, 0x2e, 0x6d, 0x61, 0x67, 0x69, 0x73, 0x74, 0x72, + 0x61, 0x6c, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x52, 0x65, 0x71, 0x1a, 0x1b, 0x2e, 0x6d, 0x61, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x6c, 0x61, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, + 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x12, 0x1a, 0x2e, 0x6d, 0x61, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x6c, 0x61, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x1a, 0x1a, 0x2e, + 0x6d, 0x61, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x6c, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0e, 0x4c, + 0x69, 0x73, 0x74, 0x41, 0x6c, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x2e, + 0x6d, 0x61, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x6c, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x1a, 0x1a, 0x2e, 0x6d, 0x61, 0x67, 0x69, + 0x73, 0x74, 0x72, 0x61, 0x6c, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0c, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1b, 0x2e, 0x6d, 0x61, 0x67, 0x69, 0x73, 0x74, + 0x72, 0x61, 0x6c, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x52, 0x65, 0x71, 0x1a, 0x1b, 0x2e, 0x6d, 0x61, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x6c, + 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, + 0x73, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x12, 0x1b, 0x2e, 0x6d, 0x61, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x6c, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, - 0x1a, 0x19, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, 0x49, 0x0a, - 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x6c, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x12, 0x19, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x1a, 0x19, 0x2e, 0x6d, 0x61, - 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x0d, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x2e, 0x6d, 0x61, 0x69, 0x6e, - 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x52, 0x65, 0x71, 0x1a, 0x1a, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, - 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, - 0x73, 0x22, 0x00, 0x42, 0x0c, 0x5a, 0x0a, 0x2e, 0x2f, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, - 0x78, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x1a, 0x1b, 0x2e, 0x6d, 0x61, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x6c, 0x61, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, + 0x4d, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x6c, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x12, 0x1b, 0x2e, 0x6d, 0x61, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x6c, 0x61, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x1a, + 0x1b, 0x2e, 0x6d, 0x61, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x6c, 0x61, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, 0x4d, + 0x0a, 0x0d, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, + 0x1c, 0x2e, 0x6d, 0x61, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x6c, 0x61, 0x2e, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x1a, 0x1c, 0x2e, + 0x6d, 0x61, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x6c, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x22, 0x00, 0x42, 0x0e, 0x5a, + 0x0c, 0x2e, 0x2f, 0x6d, 0x61, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x6c, 0x61, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1730,56 +1734,56 @@ func file_auth_proto_rawDescGZIP() []byte { var file_auth_proto_msgTypes = make([]protoimpl.MessageInfo, 20) var file_auth_proto_goTypes = []interface{}{ - (*Token)(nil), // 0: mainflux.Token - (*IdentityReq)(nil), // 1: mainflux.IdentityReq - (*IdentityRes)(nil), // 2: mainflux.IdentityRes - (*IssueReq)(nil), // 3: mainflux.IssueReq - (*LoginReq)(nil), // 4: mainflux.LoginReq - (*RefreshReq)(nil), // 5: mainflux.RefreshReq - (*AuthorizeReq)(nil), // 6: mainflux.AuthorizeReq - (*AuthorizeRes)(nil), // 7: mainflux.AuthorizeRes - (*AddPolicyReq)(nil), // 8: mainflux.AddPolicyReq - (*AddPolicyRes)(nil), // 9: mainflux.AddPolicyRes - (*DeletePolicyReq)(nil), // 10: mainflux.DeletePolicyReq - (*DeletePolicyRes)(nil), // 11: mainflux.DeletePolicyRes - (*ListObjectsReq)(nil), // 12: mainflux.ListObjectsReq - (*ListObjectsRes)(nil), // 13: mainflux.ListObjectsRes - (*CountObjectsReq)(nil), // 14: mainflux.CountObjectsReq - (*CountObjectsRes)(nil), // 15: mainflux.CountObjectsRes - (*ListSubjectsReq)(nil), // 16: mainflux.ListSubjectsReq - (*ListSubjectsRes)(nil), // 17: mainflux.ListSubjectsRes - (*CountSubjectsReq)(nil), // 18: mainflux.CountSubjectsReq - (*CountSubjectsRes)(nil), // 19: mainflux.CountSubjectsRes + (*Token)(nil), // 0: magistrala.Token + (*IdentityReq)(nil), // 1: magistrala.IdentityReq + (*IdentityRes)(nil), // 2: magistrala.IdentityRes + (*IssueReq)(nil), // 3: magistrala.IssueReq + (*LoginReq)(nil), // 4: magistrala.LoginReq + (*RefreshReq)(nil), // 5: magistrala.RefreshReq + (*AuthorizeReq)(nil), // 6: magistrala.AuthorizeReq + (*AuthorizeRes)(nil), // 7: magistrala.AuthorizeRes + (*AddPolicyReq)(nil), // 8: magistrala.AddPolicyReq + (*AddPolicyRes)(nil), // 9: magistrala.AddPolicyRes + (*DeletePolicyReq)(nil), // 10: magistrala.DeletePolicyReq + (*DeletePolicyRes)(nil), // 11: magistrala.DeletePolicyRes + (*ListObjectsReq)(nil), // 12: magistrala.ListObjectsReq + (*ListObjectsRes)(nil), // 13: magistrala.ListObjectsRes + (*CountObjectsReq)(nil), // 14: magistrala.CountObjectsReq + (*CountObjectsRes)(nil), // 15: magistrala.CountObjectsRes + (*ListSubjectsReq)(nil), // 16: magistrala.ListSubjectsReq + (*ListSubjectsRes)(nil), // 17: magistrala.ListSubjectsRes + (*CountSubjectsReq)(nil), // 18: magistrala.CountSubjectsReq + (*CountSubjectsRes)(nil), // 19: magistrala.CountSubjectsRes } var file_auth_proto_depIdxs = []int32{ - 6, // 0: mainflux.AuthzService.Authorize:input_type -> mainflux.AuthorizeReq - 3, // 1: mainflux.AuthService.Issue:input_type -> mainflux.IssueReq - 4, // 2: mainflux.AuthService.Login:input_type -> mainflux.LoginReq - 5, // 3: mainflux.AuthService.Refresh:input_type -> mainflux.RefreshReq - 1, // 4: mainflux.AuthService.Identify:input_type -> mainflux.IdentityReq - 6, // 5: mainflux.AuthService.Authorize:input_type -> mainflux.AuthorizeReq - 8, // 6: mainflux.AuthService.AddPolicy:input_type -> mainflux.AddPolicyReq - 10, // 7: mainflux.AuthService.DeletePolicy:input_type -> mainflux.DeletePolicyReq - 12, // 8: mainflux.AuthService.ListObjects:input_type -> mainflux.ListObjectsReq - 12, // 9: mainflux.AuthService.ListAllObjects:input_type -> mainflux.ListObjectsReq - 14, // 10: mainflux.AuthService.CountObjects:input_type -> mainflux.CountObjectsReq - 16, // 11: mainflux.AuthService.ListSubjects:input_type -> mainflux.ListSubjectsReq - 16, // 12: mainflux.AuthService.ListAllSubjects:input_type -> mainflux.ListSubjectsReq - 18, // 13: mainflux.AuthService.CountSubjects:input_type -> mainflux.CountSubjectsReq - 7, // 14: mainflux.AuthzService.Authorize:output_type -> mainflux.AuthorizeRes - 0, // 15: mainflux.AuthService.Issue:output_type -> mainflux.Token - 0, // 16: mainflux.AuthService.Login:output_type -> mainflux.Token - 0, // 17: mainflux.AuthService.Refresh:output_type -> mainflux.Token - 2, // 18: mainflux.AuthService.Identify:output_type -> mainflux.IdentityRes - 7, // 19: mainflux.AuthService.Authorize:output_type -> mainflux.AuthorizeRes - 9, // 20: mainflux.AuthService.AddPolicy:output_type -> mainflux.AddPolicyRes - 11, // 21: mainflux.AuthService.DeletePolicy:output_type -> mainflux.DeletePolicyRes - 13, // 22: mainflux.AuthService.ListObjects:output_type -> mainflux.ListObjectsRes - 13, // 23: mainflux.AuthService.ListAllObjects:output_type -> mainflux.ListObjectsRes - 15, // 24: mainflux.AuthService.CountObjects:output_type -> mainflux.CountObjectsRes - 17, // 25: mainflux.AuthService.ListSubjects:output_type -> mainflux.ListSubjectsRes - 17, // 26: mainflux.AuthService.ListAllSubjects:output_type -> mainflux.ListSubjectsRes - 19, // 27: mainflux.AuthService.CountSubjects:output_type -> mainflux.CountSubjectsRes + 6, // 0: magistrala.AuthzService.Authorize:input_type -> magistrala.AuthorizeReq + 3, // 1: magistrala.AuthService.Issue:input_type -> magistrala.IssueReq + 4, // 2: magistrala.AuthService.Login:input_type -> magistrala.LoginReq + 5, // 3: magistrala.AuthService.Refresh:input_type -> magistrala.RefreshReq + 1, // 4: magistrala.AuthService.Identify:input_type -> magistrala.IdentityReq + 6, // 5: magistrala.AuthService.Authorize:input_type -> magistrala.AuthorizeReq + 8, // 6: magistrala.AuthService.AddPolicy:input_type -> magistrala.AddPolicyReq + 10, // 7: magistrala.AuthService.DeletePolicy:input_type -> magistrala.DeletePolicyReq + 12, // 8: magistrala.AuthService.ListObjects:input_type -> magistrala.ListObjectsReq + 12, // 9: magistrala.AuthService.ListAllObjects:input_type -> magistrala.ListObjectsReq + 14, // 10: magistrala.AuthService.CountObjects:input_type -> magistrala.CountObjectsReq + 16, // 11: magistrala.AuthService.ListSubjects:input_type -> magistrala.ListSubjectsReq + 16, // 12: magistrala.AuthService.ListAllSubjects:input_type -> magistrala.ListSubjectsReq + 18, // 13: magistrala.AuthService.CountSubjects:input_type -> magistrala.CountSubjectsReq + 7, // 14: magistrala.AuthzService.Authorize:output_type -> magistrala.AuthorizeRes + 0, // 15: magistrala.AuthService.Issue:output_type -> magistrala.Token + 0, // 16: magistrala.AuthService.Login:output_type -> magistrala.Token + 0, // 17: magistrala.AuthService.Refresh:output_type -> magistrala.Token + 2, // 18: magistrala.AuthService.Identify:output_type -> magistrala.IdentityRes + 7, // 19: magistrala.AuthService.Authorize:output_type -> magistrala.AuthorizeRes + 9, // 20: magistrala.AuthService.AddPolicy:output_type -> magistrala.AddPolicyRes + 11, // 21: magistrala.AuthService.DeletePolicy:output_type -> magistrala.DeletePolicyRes + 13, // 22: magistrala.AuthService.ListObjects:output_type -> magistrala.ListObjectsRes + 13, // 23: magistrala.AuthService.ListAllObjects:output_type -> magistrala.ListObjectsRes + 15, // 24: magistrala.AuthService.CountObjects:output_type -> magistrala.CountObjectsRes + 17, // 25: magistrala.AuthService.ListSubjects:output_type -> magistrala.ListSubjectsRes + 17, // 26: magistrala.AuthService.ListAllSubjects:output_type -> magistrala.ListSubjectsRes + 19, // 27: magistrala.AuthService.CountSubjects:output_type -> magistrala.CountSubjectsRes 14, // [14:28] is the sub-list for method output_type 0, // [0:14] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name diff --git a/vendor/github.com/mainflux/mainflux/auth.proto b/vendor/github.com/absmach/magistrala/auth.proto similarity index 98% rename from vendor/github.com/mainflux/mainflux/auth.proto rename to vendor/github.com/absmach/magistrala/auth.proto index 0cfc26ec..db97e78a 100644 --- a/vendor/github.com/mainflux/mainflux/auth.proto +++ b/vendor/github.com/absmach/magistrala/auth.proto @@ -1,10 +1,10 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 syntax = "proto3"; -package mainflux; -option go_package = "./mainflux"; +package magistrala; +option go_package = "./magistrala"; // AuthzService is a service that provides authentication and authorization // functionalities for the things service. diff --git a/vendor/github.com/mainflux/mainflux/auth_grpc.pb.go b/vendor/github.com/absmach/magistrala/auth_grpc.pb.go similarity index 94% rename from vendor/github.com/mainflux/mainflux/auth_grpc.pb.go rename to vendor/github.com/absmach/magistrala/auth_grpc.pb.go index 1fac394b..270466b0 100644 --- a/vendor/github.com/mainflux/mainflux/auth_grpc.pb.go +++ b/vendor/github.com/absmach/magistrala/auth_grpc.pb.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 // Code generated by protoc-gen-go-grpc. DO NOT EDIT. @@ -7,7 +7,7 @@ // - protoc v4.24.3 // source: auth.proto -package mainflux +package magistrala import ( context "context" @@ -22,7 +22,7 @@ import ( const _ = grpc.SupportPackageIsVersion7 const ( - AuthzService_Authorize_FullMethodName = "/mainflux.AuthzService/Authorize" + AuthzService_Authorize_FullMethodName = "/magistrala.AuthzService/Authorize" ) // AuthzServiceClient is the client API for AuthzService service. @@ -103,7 +103,7 @@ func _AuthzService_Authorize_Handler(srv interface{}, ctx context.Context, dec f // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) var AuthzService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "mainflux.AuthzService", + ServiceName: "magistrala.AuthzService", HandlerType: (*AuthzServiceServer)(nil), Methods: []grpc.MethodDesc{ { @@ -116,19 +116,19 @@ var AuthzService_ServiceDesc = grpc.ServiceDesc{ } const ( - AuthService_Issue_FullMethodName = "/mainflux.AuthService/Issue" - AuthService_Login_FullMethodName = "/mainflux.AuthService/Login" - AuthService_Refresh_FullMethodName = "/mainflux.AuthService/Refresh" - AuthService_Identify_FullMethodName = "/mainflux.AuthService/Identify" - AuthService_Authorize_FullMethodName = "/mainflux.AuthService/Authorize" - AuthService_AddPolicy_FullMethodName = "/mainflux.AuthService/AddPolicy" - AuthService_DeletePolicy_FullMethodName = "/mainflux.AuthService/DeletePolicy" - AuthService_ListObjects_FullMethodName = "/mainflux.AuthService/ListObjects" - AuthService_ListAllObjects_FullMethodName = "/mainflux.AuthService/ListAllObjects" - AuthService_CountObjects_FullMethodName = "/mainflux.AuthService/CountObjects" - AuthService_ListSubjects_FullMethodName = "/mainflux.AuthService/ListSubjects" - AuthService_ListAllSubjects_FullMethodName = "/mainflux.AuthService/ListAllSubjects" - AuthService_CountSubjects_FullMethodName = "/mainflux.AuthService/CountSubjects" + AuthService_Issue_FullMethodName = "/magistrala.AuthService/Issue" + AuthService_Login_FullMethodName = "/magistrala.AuthService/Login" + AuthService_Refresh_FullMethodName = "/magistrala.AuthService/Refresh" + AuthService_Identify_FullMethodName = "/magistrala.AuthService/Identify" + AuthService_Authorize_FullMethodName = "/magistrala.AuthService/Authorize" + AuthService_AddPolicy_FullMethodName = "/magistrala.AuthService/AddPolicy" + AuthService_DeletePolicy_FullMethodName = "/magistrala.AuthService/DeletePolicy" + AuthService_ListObjects_FullMethodName = "/magistrala.AuthService/ListObjects" + AuthService_ListAllObjects_FullMethodName = "/magistrala.AuthService/ListAllObjects" + AuthService_CountObjects_FullMethodName = "/magistrala.AuthService/CountObjects" + AuthService_ListSubjects_FullMethodName = "/magistrala.AuthService/ListSubjects" + AuthService_ListAllSubjects_FullMethodName = "/magistrala.AuthService/ListAllSubjects" + AuthService_CountSubjects_FullMethodName = "/magistrala.AuthService/CountSubjects" ) // AuthServiceClient is the client API for AuthService service. @@ -589,7 +589,7 @@ func _AuthService_CountSubjects_Handler(srv interface{}, ctx context.Context, de // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) var AuthService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "mainflux.AuthService", + ServiceName: "magistrala.AuthService", HandlerType: (*AuthServiceServer)(nil), Methods: []grpc.MethodDesc{ { diff --git a/vendor/github.com/mainflux/mainflux/config.toml b/vendor/github.com/absmach/magistrala/config.toml similarity index 100% rename from vendor/github.com/mainflux/mainflux/config.toml rename to vendor/github.com/absmach/magistrala/config.toml diff --git a/vendor/github.com/absmach/magistrala/doc.go b/vendor/github.com/absmach/magistrala/doc.go new file mode 100644 index 00000000..c5b93de6 --- /dev/null +++ b/vendor/github.com/absmach/magistrala/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Magistrala +// SPDX-License-Identifier: Apache-2.0 + +// package magistrala acts as an umbrella package containing multiple different +// microservices and defines all shared domain concepts. +package magistrala diff --git a/vendor/github.com/mainflux/mainflux/health.go b/vendor/github.com/absmach/magistrala/health.go similarity index 88% rename from vendor/github.com/mainflux/mainflux/health.go rename to vendor/github.com/absmach/magistrala/health.go index da50259c..b7324de7 100644 --- a/vendor/github.com/mainflux/mainflux/health.go +++ b/vendor/github.com/absmach/magistrala/health.go @@ -1,7 +1,7 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 -package mainflux +package magistrala import ( "encoding/json" @@ -18,15 +18,15 @@ const ( var ( // Version represents the last service git tag in git history. // It's meant to be set using go build ldflags: - // -ldflags "-X 'github.com/mainflux/mainflux.Version=0.0.0'". + // -ldflags "-X 'github.com/absmach/magistrala.Version=0.0.0'". Version = "0.0.0" // Commit represents the service git commit hash. // It's meant to be set using go build ldflags: - // -ldflags "-X 'github.com/mainflux/mainflux.Commit=ffffffff'". + // -ldflags "-X 'github.com/absmach/magistrala.Commit=ffffffff'". Commit = "ffffffff" // BuildTime represetns the service build time. // It's meant to be set using go build ldflags: - // -ldflags "-X 'github.com/mainflux/mainflux.BuildTime=1970-01-01_00:00:00'". + // -ldflags "-X 'github.com/absmach/magistrala.BuildTime=1970-01-01_00:00:00'". BuildTime = "1970-01-01_00:00:00" ) diff --git a/vendor/github.com/mainflux/mainflux/internal/apiutil/errors.go b/vendor/github.com/absmach/magistrala/internal/apiutil/errors.go similarity index 98% rename from vendor/github.com/mainflux/mainflux/internal/apiutil/errors.go rename to vendor/github.com/absmach/magistrala/internal/apiutil/errors.go index 21382a3e..b02c4f37 100644 --- a/vendor/github.com/mainflux/mainflux/internal/apiutil/errors.go +++ b/vendor/github.com/absmach/magistrala/internal/apiutil/errors.go @@ -1,9 +1,9 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package apiutil -import "github.com/mainflux/mainflux/pkg/errors" +import "github.com/absmach/magistrala/pkg/errors" // Errors defined in this file are used by the LoggingErrorEncoder decorator // to distinguish and log API request validation errors and avoid that service diff --git a/vendor/github.com/mainflux/mainflux/internal/apiutil/responses.go b/vendor/github.com/absmach/magistrala/internal/apiutil/responses.go similarity index 87% rename from vendor/github.com/mainflux/mainflux/internal/apiutil/responses.go rename to vendor/github.com/absmach/magistrala/internal/apiutil/responses.go index e531273a..67541c90 100644 --- a/vendor/github.com/mainflux/mainflux/internal/apiutil/responses.go +++ b/vendor/github.com/absmach/magistrala/internal/apiutil/responses.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package apiutil diff --git a/vendor/github.com/mainflux/mainflux/internal/apiutil/token.go b/vendor/github.com/absmach/magistrala/internal/apiutil/token.go similarity index 97% rename from vendor/github.com/mainflux/mainflux/internal/apiutil/token.go rename to vendor/github.com/absmach/magistrala/internal/apiutil/token.go index ba10d6ee..a6159ca8 100644 --- a/vendor/github.com/mainflux/mainflux/internal/apiutil/token.go +++ b/vendor/github.com/absmach/magistrala/internal/apiutil/token.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package apiutil diff --git a/vendor/github.com/mainflux/mainflux/internal/apiutil/transport.go b/vendor/github.com/absmach/magistrala/internal/apiutil/transport.go similarity index 96% rename from vendor/github.com/mainflux/mainflux/internal/apiutil/transport.go rename to vendor/github.com/absmach/magistrala/internal/apiutil/transport.go index 25aab93b..58304592 100644 --- a/vendor/github.com/mainflux/mainflux/internal/apiutil/transport.go +++ b/vendor/github.com/absmach/magistrala/internal/apiutil/transport.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package apiutil @@ -9,10 +9,10 @@ import ( "net/http" "strconv" + "github.com/absmach/magistrala/logger" + "github.com/absmach/magistrala/pkg/errors" kithttp "github.com/go-kit/kit/transport/http" "github.com/go-zoo/bone" - "github.com/mainflux/mainflux/logger" - "github.com/mainflux/mainflux/pkg/errors" ) // LoggingErrorEncoder is a go-kit error encoder logging decorator. diff --git a/vendor/github.com/mainflux/mainflux/logger/doc.go b/vendor/github.com/absmach/magistrala/logger/doc.go similarity index 84% rename from vendor/github.com/mainflux/mainflux/logger/doc.go rename to vendor/github.com/absmach/magistrala/logger/doc.go index 76b5e7df..791beb0c 100644 --- a/vendor/github.com/mainflux/mainflux/logger/doc.go +++ b/vendor/github.com/absmach/magistrala/logger/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 // Package logger contains logger API definition, wrapper that diff --git a/vendor/github.com/mainflux/mainflux/logger/exit.go b/vendor/github.com/absmach/magistrala/logger/exit.go similarity index 86% rename from vendor/github.com/mainflux/mainflux/logger/exit.go rename to vendor/github.com/absmach/magistrala/logger/exit.go index d6535aa5..ef0c93dc 100644 --- a/vendor/github.com/mainflux/mainflux/logger/exit.go +++ b/vendor/github.com/absmach/magistrala/logger/exit.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package logger diff --git a/vendor/github.com/mainflux/mainflux/logger/level.go b/vendor/github.com/absmach/magistrala/logger/level.go similarity index 97% rename from vendor/github.com/mainflux/mainflux/logger/level.go rename to vendor/github.com/absmach/magistrala/logger/level.go index 8f0818e8..dfc1812d 100644 --- a/vendor/github.com/mainflux/mainflux/logger/level.go +++ b/vendor/github.com/absmach/magistrala/logger/level.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package logger diff --git a/vendor/github.com/mainflux/mainflux/logger/logger.go b/vendor/github.com/absmach/magistrala/logger/logger.go similarity index 98% rename from vendor/github.com/mainflux/mainflux/logger/logger.go rename to vendor/github.com/absmach/magistrala/logger/logger.go index 8ea5fbc8..af95e658 100644 --- a/vendor/github.com/mainflux/mainflux/logger/logger.go +++ b/vendor/github.com/absmach/magistrala/logger/logger.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package logger diff --git a/vendor/github.com/mainflux/mainflux/logger/mock.go b/vendor/github.com/absmach/magistrala/logger/mock.go similarity index 93% rename from vendor/github.com/mainflux/mainflux/logger/mock.go rename to vendor/github.com/absmach/magistrala/logger/mock.go index 544dd55d..b15e6937 100644 --- a/vendor/github.com/mainflux/mainflux/logger/mock.go +++ b/vendor/github.com/absmach/magistrala/logger/mock.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package logger diff --git a/vendor/github.com/mainflux/mainflux/pkg/errors/README.md b/vendor/github.com/absmach/magistrala/pkg/errors/README.md similarity index 100% rename from vendor/github.com/mainflux/mainflux/pkg/errors/README.md rename to vendor/github.com/absmach/magistrala/pkg/errors/README.md diff --git a/vendor/github.com/absmach/magistrala/pkg/errors/doc.go b/vendor/github.com/absmach/magistrala/pkg/errors/doc.go new file mode 100644 index 00000000..ef7a644c --- /dev/null +++ b/vendor/github.com/absmach/magistrala/pkg/errors/doc.go @@ -0,0 +1,5 @@ +// Copyright (c) Magistrala +// SPDX-License-Identifier: Apache-2.0 + +// Package errors contains Magistrala errors definitions. +package errors diff --git a/vendor/github.com/mainflux/mainflux/pkg/errors/errors.go b/vendor/github.com/absmach/magistrala/pkg/errors/errors.go similarity index 97% rename from vendor/github.com/mainflux/mainflux/pkg/errors/errors.go rename to vendor/github.com/absmach/magistrala/pkg/errors/errors.go index 47e0ff05..7f590c9c 100644 --- a/vendor/github.com/mainflux/mainflux/pkg/errors/errors.go +++ b/vendor/github.com/absmach/magistrala/pkg/errors/errors.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package errors @@ -29,7 +29,7 @@ type Error interface { var _ Error = (*customError)(nil) -// customError represents a Mainflux error. +// customError represents a Magistrala error. type customError struct { msg string err Error diff --git a/vendor/github.com/mainflux/mainflux/pkg/errors/sdk_errors.go b/vendor/github.com/absmach/magistrala/pkg/errors/sdk_errors.go similarity index 96% rename from vendor/github.com/mainflux/mainflux/pkg/errors/sdk_errors.go rename to vendor/github.com/absmach/magistrala/pkg/errors/sdk_errors.go index 42d9d9df..884a127c 100644 --- a/vendor/github.com/mainflux/mainflux/pkg/errors/sdk_errors.go +++ b/vendor/github.com/absmach/magistrala/pkg/errors/sdk_errors.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package errors @@ -18,7 +18,7 @@ type errorRes struct { // Failed to read response body. var errRespBody = New("failed to read response body") -// SDKError is an error type for Mainflux SDK. +// SDKError is an error type for Magistrala SDK. type SDKError interface { Error StatusCode() int diff --git a/vendor/github.com/mainflux/mainflux/pkg/errors/types.go b/vendor/github.com/absmach/magistrala/pkg/errors/types.go similarity index 98% rename from vendor/github.com/mainflux/mainflux/pkg/errors/types.go rename to vendor/github.com/absmach/magistrala/pkg/errors/types.go index 5d6eeba1..ede8e901 100644 --- a/vendor/github.com/mainflux/mainflux/pkg/errors/types.go +++ b/vendor/github.com/absmach/magistrala/pkg/errors/types.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package errors diff --git a/vendor/github.com/mainflux/mainflux/pkg/messaging/README.md b/vendor/github.com/absmach/magistrala/pkg/messaging/README.md similarity index 100% rename from vendor/github.com/mainflux/mainflux/pkg/messaging/README.md rename to vendor/github.com/absmach/magistrala/pkg/messaging/README.md diff --git a/vendor/github.com/mainflux/mainflux/pkg/messaging/message.pb.go b/vendor/github.com/absmach/magistrala/pkg/messaging/message.pb.go similarity index 98% rename from vendor/github.com/mainflux/mainflux/pkg/messaging/message.pb.go rename to vendor/github.com/absmach/magistrala/pkg/messaging/message.pb.go index 490118cd..80a39df9 100644 --- a/vendor/github.com/mainflux/mainflux/pkg/messaging/message.pb.go +++ b/vendor/github.com/absmach/magistrala/pkg/messaging/message.pb.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 // Code generated by protoc-gen-go. DO NOT EDIT. @@ -23,7 +23,7 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// Message represents a message emitted by the Mainflux adapters layer. +// Message represents a message emitted by the Magistrala adapters layer. type Message struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/github.com/mainflux/mainflux/pkg/messaging/message.proto b/vendor/github.com/absmach/magistrala/pkg/messaging/message.proto similarity index 75% rename from vendor/github.com/mainflux/mainflux/pkg/messaging/message.proto rename to vendor/github.com/absmach/magistrala/pkg/messaging/message.proto index 76e12229..b37b7d3e 100644 --- a/vendor/github.com/mainflux/mainflux/pkg/messaging/message.proto +++ b/vendor/github.com/absmach/magistrala/pkg/messaging/message.proto @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 syntax = "proto3"; @@ -6,7 +6,7 @@ package messaging; option go_package = "./messaging"; -// Message represents a message emitted by the Mainflux adapters layer. +// Message represents a message emitted by the Magistrala adapters layer. message Message { string channel = 1; string subtopic = 2; diff --git a/vendor/github.com/absmach/magistrala/pkg/messaging/pubsub.go b/vendor/github.com/absmach/magistrala/pkg/messaging/pubsub.go new file mode 100644 index 00000000..8bebadd2 --- /dev/null +++ b/vendor/github.com/absmach/magistrala/pkg/messaging/pubsub.go @@ -0,0 +1,80 @@ +// Copyright (c) Magistrala +// SPDX-License-Identifier: Apache-2.0 + +package messaging + +import "context" + +type DeliveryPolicy uint8 + +const ( + // DeliverNewPolicy will only deliver new messages that are sent after the consumer is created. + // This is the default policy. + DeliverNewPolicy DeliveryPolicy = iota + + // DeliverAllPolicy starts delivering messages from the very beginning of a stream. + DeliverAllPolicy +) + +// Publisher specifies message publishing API. +type Publisher interface { + // Publishes message to the stream. + Publish(ctx context.Context, topic string, msg *Message) error + + // Close gracefully closes message publisher's connection. + Close() error +} + +// MessageHandler represents Message handler for Subscriber. +type MessageHandler interface { + // Handle handles messages passed by underlying implementation. + Handle(msg *Message) error + + // Cancel is used for cleanup during unsubscribing and it's optional. + Cancel() error +} + +type SubscriberConfig struct { + ID string + Topic string + Handler MessageHandler + DeliveryPolicy DeliveryPolicy +} + +// Subscriber specifies message subscription API. +type Subscriber interface { + // Subscribe subscribes to the message stream and consumes messages. + Subscribe(ctx context.Context, cfg SubscriberConfig) error + + // Unsubscribe unsubscribes from the message stream and + // stops consuming messages. + Unsubscribe(ctx context.Context, id, topic string) error + + // Close gracefully closes message subscriber's connection. + Close() error +} + +// PubSub represents aggregation interface for publisher and subscriber. +type PubSub interface { + Publisher + Subscriber +} + +// Option represents optional configuration for message broker. +// +// This is used to provide optional configuration parameters to the +// underlying publisher and pubsub implementation so that it can be +// configured to meet the specific needs. +// +// For example, it can be used to set the message prefix so that +// brokers can be used for event sourcing as well as internal message broker. +// Using value of type interface is not recommended but is the most suitable +// for this use case as options should be compiled with respect to the +// underlying broker which can either be RabbitMQ or NATS. +// +// The example below shows how to set the prefix and jetstream stream for NATS. +// +// Example: +// +// broker.NewPublisher(ctx, url, broker.Prefix(eventsPrefix), broker.JSStream(js)) +type Option func(vals interface{}) error diff --git a/vendor/github.com/absmach/magistrala/pkg/sdk/go/README.md b/vendor/github.com/absmach/magistrala/pkg/sdk/go/README.md new file mode 100644 index 00000000..f82f782f --- /dev/null +++ b/vendor/github.com/absmach/magistrala/pkg/sdk/go/README.md @@ -0,0 +1,83 @@ +# Magistrala Go SDK + +Go SDK, a Go driver for Magistrala HTTP API. + +Does both system administration (provisioning) and messaging. + +## Installation + +Import `"github.com/absmach/magistrala/sdk/go"` in your Go package. + +```` +import "github.com/absmach/magistrala/pkg/sdk/go"``` + +Then call SDK Go functions to interact with the system. + +## API Reference + +```go +FUNCTIONS + +func NewMgxSDK(host, port string, tls bool) *MgxSDK + +func (sdk *MgxSDK) Channel(id, token string) (things.Channel, error) + Channel - gets channel by ID + +func (sdk *MgxSDK) Channels(token string) ([]things.Channel, error) + Channels - gets all channels + +func (sdk *MgxSDK) Connect(struct{[]string, []string}, token string) error + Connect - connect things to channels + +func (sdk *MgxSDK) CreateChannel(data, token string) (string, error) + CreateChannel - creates new channel and generates UUID + +func (sdk *MgxSDK) CreateThing(data, token string) (string, error) + CreateThing - creates new thing and generates thing UUID + +func (sdk *MgxSDK) CreateToken(user, pwd string) (string, error) + CreateToken - create user token + +func (sdk *MgxSDK) CreateUser(user, pwd string) error + CreateUser - create user + +func (sdk *MgxSDK) User(pwd string) (user, error) + User - gets user + +func (sdk *MgxSDK) UpdateUser(user, pwd string) error + UpdateUser - update user + +func (sdk *MgxSDK) UpdatePassword(user, pwd string) error + UpdatePassword - update user password + +func (sdk *MgxSDK) DeleteChannel(id, token string) error + DeleteChannel - removes channel + +func (sdk *MgxSDK) DeleteThing(id, token string) error + DeleteThing - removes thing + +func (sdk *MgxSDK) DisconnectThing(thingID, chanID, token string) error + DisconnectThing - connect thing to a channel + +func (sdk *MgxSDK) SendMessage(chanID, msg, token string) error + SendMessage - send message on Magistrala channel + +func (sdk *MgxSDK) SetContentType(ct ContentType) error + SetContentType - set message content type. Available options are SenML + JSON, custom JSON and custom binary (octet-stream). + +func (sdk *MgxSDK) Thing(id, token string) (Thing, error) + Thing - gets thing by ID + +func (sdk *MgxSDK) Things(token string) ([]Thing, error) + Things - gets all things + +func (sdk *MgxSDK) UpdateChannel(channel Channel, token string) error + UpdateChannel - update a channel + +func (sdk *MgxSDK) UpdateThing(thing Thing, token string) error + UpdateThing - updates thing by ID + +func (sdk *MgxSDK) Health() (magistrala.Health, error) + Health - things service health check +```` diff --git a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/bootstrap.go b/vendor/github.com/absmach/magistrala/pkg/sdk/go/bootstrap.go similarity index 86% rename from vendor/github.com/mainflux/mainflux/pkg/sdk/go/bootstrap.go rename to vendor/github.com/absmach/magistrala/pkg/sdk/go/bootstrap.go index 26a461a7..4c89b147 100644 --- a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/bootstrap.go +++ b/vendor/github.com/absmach/magistrala/pkg/sdk/go/bootstrap.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package sdk @@ -9,8 +9,8 @@ import ( "net/http" "strings" - "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/pkg/errors" + "github.com/absmach/magistrala/internal/apiutil" + "github.com/absmach/magistrala/pkg/errors" ) const ( @@ -23,10 +23,10 @@ const ( ) // BootstrapConfig represents Configuration entity. It wraps information about external entity -// as well as info about corresponding Mainflux entities. -// MFThing represents corresponding Mainflux Thing ID. -// MFKey is key of corresponding Mainflux Thing. -// MFChannels is a list of Mainflux Channels corresponding Mainflux Thing connects to. +// as well as info about corresponding Magistrala entities. +// MGThing represents corresponding Magistrala Thing ID. +// MGKey is key of corresponding Magistrala Thing. +// MGChannels is a list of Magistrala Channels corresponding Magistrala Thing connects to. type BootstrapConfig struct { Channels interface{} `json:"channels,omitempty"` ExternalID string `json:"external_id,omitempty"` @@ -90,7 +90,7 @@ func (ts *BootstrapConfig) UnmarshalJSON(data []byte) error { return nil } -func (sdk mfSDK) AddBootstrap(cfg BootstrapConfig, token string) (string, errors.SDKError) { +func (sdk mgSDK) AddBootstrap(cfg BootstrapConfig, token string) (string, errors.SDKError) { data, err := json.Marshal(cfg) if err != nil { return "", errors.NewSDKError(err) @@ -108,7 +108,7 @@ func (sdk mfSDK) AddBootstrap(cfg BootstrapConfig, token string) (string, errors return id, nil } -func (sdk mfSDK) Bootstraps(pm PageMetadata, token string) (BootstrapPage, errors.SDKError) { +func (sdk mgSDK) Bootstraps(pm PageMetadata, token string) (BootstrapPage, errors.SDKError) { url, err := sdk.withQueryParams(sdk.bootstrapURL, configsEndpoint, pm) if err != nil { return BootstrapPage{}, errors.NewSDKError(err) @@ -127,7 +127,7 @@ func (sdk mfSDK) Bootstraps(pm PageMetadata, token string) (BootstrapPage, error return bb, nil } -func (sdk mfSDK) Whitelist(cfg BootstrapConfig, token string) errors.SDKError { +func (sdk mgSDK) Whitelist(cfg BootstrapConfig, token string) errors.SDKError { data, err := json.Marshal(BootstrapConfig{State: cfg.State}) if err != nil { return errors.NewSDKError(err) @@ -144,7 +144,7 @@ func (sdk mfSDK) Whitelist(cfg BootstrapConfig, token string) errors.SDKError { return sdkerr } -func (sdk mfSDK) ViewBootstrap(id, token string) (BootstrapConfig, errors.SDKError) { +func (sdk mgSDK) ViewBootstrap(id, token string) (BootstrapConfig, errors.SDKError) { url := fmt.Sprintf("%s/%s/%s", sdk.bootstrapURL, configsEndpoint, id) _, body, err := sdk.processRequest(http.MethodGet, url, token, nil, nil, http.StatusOK) @@ -160,7 +160,7 @@ func (sdk mfSDK) ViewBootstrap(id, token string) (BootstrapConfig, errors.SDKErr return bc, nil } -func (sdk mfSDK) UpdateBootstrap(cfg BootstrapConfig, token string) errors.SDKError { +func (sdk mgSDK) UpdateBootstrap(cfg BootstrapConfig, token string) errors.SDKError { data, err := json.Marshal(cfg) if err != nil { return errors.NewSDKError(err) @@ -173,7 +173,7 @@ func (sdk mfSDK) UpdateBootstrap(cfg BootstrapConfig, token string) errors.SDKEr return sdkerr } -func (sdk mfSDK) UpdateBootstrapCerts(id, clientCert, clientKey, ca, token string) (BootstrapConfig, errors.SDKError) { +func (sdk mgSDK) UpdateBootstrapCerts(id, clientCert, clientKey, ca, token string) (BootstrapConfig, errors.SDKError) { url := fmt.Sprintf("%s/%s/%s", sdk.bootstrapURL, bootstrapCertsEndpoint, id) request := BootstrapConfig{ ClientCert: clientCert, @@ -196,7 +196,7 @@ func (sdk mfSDK) UpdateBootstrapCerts(id, clientCert, clientKey, ca, token strin return bc, sdkerr } -func (sdk mfSDK) UpdateBootstrapConnection(id string, channels []string, token string) errors.SDKError { +func (sdk mgSDK) UpdateBootstrapConnection(id string, channels []string, token string) errors.SDKError { url := fmt.Sprintf("%s/%s/%s", sdk.bootstrapURL, bootstrapConnEndpoint, id) request := map[string][]string{ "channels": channels, @@ -210,14 +210,14 @@ func (sdk mfSDK) UpdateBootstrapConnection(id string, channels []string, token s return sdkerr } -func (sdk mfSDK) RemoveBootstrap(id, token string) errors.SDKError { +func (sdk mgSDK) RemoveBootstrap(id, token string) errors.SDKError { url := fmt.Sprintf("%s/%s/%s", sdk.bootstrapURL, configsEndpoint, id) _, _, err := sdk.processRequest(http.MethodDelete, url, token, nil, nil, http.StatusNoContent) return err } -func (sdk mfSDK) Bootstrap(externalID, externalKey string) (BootstrapConfig, errors.SDKError) { +func (sdk mgSDK) Bootstrap(externalID, externalKey string) (BootstrapConfig, errors.SDKError) { url := fmt.Sprintf("%s/%s/%s", sdk.bootstrapURL, bootstrapEndpoint, externalID) _, body, err := sdk.processRequest(http.MethodGet, url, ThingPrefix+externalKey, nil, nil, http.StatusOK) @@ -233,7 +233,7 @@ func (sdk mfSDK) Bootstrap(externalID, externalKey string) (BootstrapConfig, err return bc, nil } -func (sdk mfSDK) BootstrapSecure(externalID, externalKey string) (BootstrapConfig, errors.SDKError) { +func (sdk mgSDK) BootstrapSecure(externalID, externalKey string) (BootstrapConfig, errors.SDKError) { url := fmt.Sprintf("%s/%s/%s/%s", sdk.bootstrapURL, bootstrapEndpoint, secureEndpoint, externalID) _, body, err := sdk.processRequest(http.MethodGet, url, ThingPrefix+externalKey, nil, nil, http.StatusOK) diff --git a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/certs.go b/vendor/github.com/absmach/magistrala/pkg/sdk/go/certs.go similarity index 86% rename from vendor/github.com/mainflux/mainflux/pkg/sdk/go/certs.go rename to vendor/github.com/absmach/magistrala/pkg/sdk/go/certs.go index e7cfd0a3..0932dd4a 100644 --- a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/certs.go +++ b/vendor/github.com/absmach/magistrala/pkg/sdk/go/certs.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package sdk @@ -9,7 +9,7 @@ import ( "net/http" "time" - "github.com/mainflux/mainflux/pkg/errors" + "github.com/absmach/magistrala/pkg/errors" ) const ( @@ -26,7 +26,7 @@ type Cert struct { Expiration time.Time `json:"expiration,omitempty"` } -func (sdk mfSDK) IssueCert(thingID, valid, token string) (Cert, errors.SDKError) { +func (sdk mgSDK) IssueCert(thingID, valid, token string) (Cert, errors.SDKError) { r := certReq{ ThingID: thingID, Valid: valid, @@ -51,7 +51,7 @@ func (sdk mfSDK) IssueCert(thingID, valid, token string) (Cert, errors.SDKError) return c, nil } -func (sdk mfSDK) ViewCert(id, token string) (Cert, errors.SDKError) { +func (sdk mgSDK) ViewCert(id, token string) (Cert, errors.SDKError) { url := fmt.Sprintf("%s/%s/%s", sdk.certsURL, certsEndpoint, id) _, body, err := sdk.processRequest(http.MethodGet, url, token, nil, nil, http.StatusOK) @@ -67,7 +67,7 @@ func (sdk mfSDK) ViewCert(id, token string) (Cert, errors.SDKError) { return cert, nil } -func (sdk mfSDK) ViewCertByThing(thingID, token string) (CertSerials, errors.SDKError) { +func (sdk mgSDK) ViewCertByThing(thingID, token string) (CertSerials, errors.SDKError) { url := fmt.Sprintf("%s/%s/%s", sdk.certsURL, serialsEndpoint, thingID) _, body, err := sdk.processRequest(http.MethodGet, url, token, nil, nil, http.StatusOK) @@ -83,7 +83,7 @@ func (sdk mfSDK) ViewCertByThing(thingID, token string) (CertSerials, errors.SDK return cs, nil } -func (sdk mfSDK) RevokeCert(id, token string) (time.Time, errors.SDKError) { +func (sdk mgSDK) RevokeCert(id, token string) (time.Time, errors.SDKError) { url := fmt.Sprintf("%s/%s/%s", sdk.certsURL, certsEndpoint, id) _, body, err := sdk.processRequest(http.MethodDelete, url, token, nil, nil, http.StatusOK) diff --git a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/channels.go b/vendor/github.com/absmach/magistrala/pkg/sdk/go/channels.go similarity index 82% rename from vendor/github.com/mainflux/mainflux/pkg/sdk/go/channels.go rename to vendor/github.com/absmach/magistrala/pkg/sdk/go/channels.go index 21f3d5df..967e35b7 100644 --- a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/channels.go +++ b/vendor/github.com/absmach/magistrala/pkg/sdk/go/channels.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package sdk @@ -9,12 +9,12 @@ import ( "net/http" "time" - "github.com/mainflux/mainflux/pkg/errors" + "github.com/absmach/magistrala/pkg/errors" ) const channelsEndpoint = "channels" -// Channel represents mainflux channel. +// Channel represents magistrala channel. type Channel struct { ID string `json:"id"` OwnerID string `json:"owner_id,omitempty"` @@ -30,7 +30,7 @@ type Channel struct { Status string `json:"status,omitempty"` } -func (sdk mfSDK) CreateChannel(c Channel, token string) (Channel, errors.SDKError) { +func (sdk mgSDK) CreateChannel(c Channel, token string) (Channel, errors.SDKError) { data, err := json.Marshal(c) if err != nil { return Channel{}, errors.NewSDKError(err) @@ -50,7 +50,7 @@ func (sdk mfSDK) CreateChannel(c Channel, token string) (Channel, errors.SDKErro return c, nil } -func (sdk mfSDK) CreateChannels(chs []Channel, token string) ([]Channel, errors.SDKError) { +func (sdk mgSDK) CreateChannels(chs []Channel, token string) ([]Channel, errors.SDKError) { data, err := json.Marshal(chs) if err != nil { return []Channel{}, errors.NewSDKError(err) @@ -71,7 +71,7 @@ func (sdk mfSDK) CreateChannels(chs []Channel, token string) ([]Channel, errors. return ccr.Channels, nil } -func (sdk mfSDK) Channels(pm PageMetadata, token string) (ChannelsPage, errors.SDKError) { +func (sdk mgSDK) Channels(pm PageMetadata, token string) (ChannelsPage, errors.SDKError) { url, err := sdk.withQueryParams(sdk.thingsURL, channelsEndpoint, pm) if err != nil { return ChannelsPage{}, errors.NewSDKError(err) @@ -90,7 +90,7 @@ func (sdk mfSDK) Channels(pm PageMetadata, token string) (ChannelsPage, errors.S return cp, nil } -func (sdk mfSDK) ChannelsByThing(thingID string, pm PageMetadata, token string) (ChannelsPage, errors.SDKError) { +func (sdk mgSDK) ChannelsByThing(thingID string, pm PageMetadata, token string) (ChannelsPage, errors.SDKError) { url, err := sdk.withQueryParams(fmt.Sprintf("%s/things/%s", sdk.thingsURL, thingID), channelsEndpoint, pm) if err != nil { return ChannelsPage{}, errors.NewSDKError(err) @@ -109,7 +109,7 @@ func (sdk mfSDK) ChannelsByThing(thingID string, pm PageMetadata, token string) return cp, nil } -func (sdk mfSDK) Channel(id, token string) (Channel, errors.SDKError) { +func (sdk mgSDK) Channel(id, token string) (Channel, errors.SDKError) { url := fmt.Sprintf("%s/%s/%s", sdk.thingsURL, channelsEndpoint, id) _, body, err := sdk.processRequest(http.MethodGet, url, token, nil, nil, http.StatusOK) @@ -125,7 +125,7 @@ func (sdk mfSDK) Channel(id, token string) (Channel, errors.SDKError) { return c, nil } -func (sdk mfSDK) UpdateChannel(c Channel, token string) (Channel, errors.SDKError) { +func (sdk mgSDK) UpdateChannel(c Channel, token string) (Channel, errors.SDKError) { data, err := json.Marshal(c) if err != nil { return Channel{}, errors.NewSDKError(err) @@ -146,7 +146,7 @@ func (sdk mfSDK) UpdateChannel(c Channel, token string) (Channel, errors.SDKErro return c, nil } -func (sdk mfSDK) AddUserToChannel(channelID string, req UsersRelationRequest, token string) errors.SDKError { +func (sdk mgSDK) AddUserToChannel(channelID string, req UsersRelationRequest, token string) errors.SDKError { data, err := json.Marshal(req) if err != nil { return errors.NewSDKError(err) @@ -158,7 +158,7 @@ func (sdk mfSDK) AddUserToChannel(channelID string, req UsersRelationRequest, to return sdkerr } -func (sdk mfSDK) RemoveUserFromChannel(channelID string, req UsersRelationRequest, token string) errors.SDKError { +func (sdk mgSDK) RemoveUserFromChannel(channelID string, req UsersRelationRequest, token string) errors.SDKError { data, err := json.Marshal(req) if err != nil { return errors.NewSDKError(err) @@ -170,8 +170,8 @@ func (sdk mfSDK) RemoveUserFromChannel(channelID string, req UsersRelationReques return sdkerr } -func (sdk mfSDK) ListChannelUsers(channelID string, pm PageMetadata, token string) (UsersPage, errors.SDKError) { - url, err := sdk.withQueryParams(sdk.thingsURL, fmt.Sprintf("%s/%s/%s", channelsEndpoint, channelID, usersEndpoint), pm) +func (sdk mgSDK) ListChannelUsers(channelID string, pm PageMetadata, token string) (UsersPage, errors.SDKError) { + url, err := sdk.withQueryParams(sdk.usersURL, fmt.Sprintf("%s/%s/%s", channelsEndpoint, channelID, usersEndpoint), pm) if err != nil { return UsersPage{}, errors.NewSDKError(err) } @@ -187,7 +187,7 @@ func (sdk mfSDK) ListChannelUsers(channelID string, pm PageMetadata, token strin return up, nil } -func (sdk mfSDK) AddUserGroupToChannel(channelID string, req UserGroupsRequest, token string) errors.SDKError { +func (sdk mgSDK) AddUserGroupToChannel(channelID string, req UserGroupsRequest, token string) errors.SDKError { data, err := json.Marshal(req) if err != nil { return errors.NewSDKError(err) @@ -199,7 +199,7 @@ func (sdk mfSDK) AddUserGroupToChannel(channelID string, req UserGroupsRequest, return sdkerr } -func (sdk mfSDK) RemoveUserGroupFromChannel(channelID string, req UserGroupsRequest, token string) errors.SDKError { +func (sdk mgSDK) RemoveUserGroupFromChannel(channelID string, req UserGroupsRequest, token string) errors.SDKError { data, err := json.Marshal(req) if err != nil { return errors.NewSDKError(err) @@ -211,8 +211,8 @@ func (sdk mfSDK) RemoveUserGroupFromChannel(channelID string, req UserGroupsRequ return sdkerr } -func (sdk mfSDK) ListChannelUserGroups(channelID string, pm PageMetadata, token string) (GroupsPage, errors.SDKError) { - url, err := sdk.withQueryParams(sdk.thingsURL, fmt.Sprintf("%s/%s/%s", channelsEndpoint, channelID, groupsEndpoint), pm) +func (sdk mgSDK) ListChannelUserGroups(channelID string, pm PageMetadata, token string) (GroupsPage, errors.SDKError) { + url, err := sdk.withQueryParams(sdk.usersURL, fmt.Sprintf("%s/%s/%s", channelsEndpoint, channelID, groupsEndpoint), pm) if err != nil { return GroupsPage{}, errors.NewSDKError(err) } @@ -228,7 +228,7 @@ func (sdk mfSDK) ListChannelUserGroups(channelID string, pm PageMetadata, token return gp, nil } -func (sdk mfSDK) Connect(conn Connection, token string) errors.SDKError { +func (sdk mgSDK) Connect(conn Connection, token string) errors.SDKError { data, err := json.Marshal(conn) if err != nil { return errors.NewSDKError(err) @@ -241,7 +241,7 @@ func (sdk mfSDK) Connect(conn Connection, token string) errors.SDKError { return sdkerr } -func (sdk mfSDK) Disconnect(connIDs Connection, token string) errors.SDKError { +func (sdk mgSDK) Disconnect(connIDs Connection, token string) errors.SDKError { data, err := json.Marshal(connIDs) if err != nil { return errors.NewSDKError(err) @@ -254,7 +254,7 @@ func (sdk mfSDK) Disconnect(connIDs Connection, token string) errors.SDKError { return sdkerr } -func (sdk mfSDK) ConnectThing(thingID, channelID, token string) errors.SDKError { +func (sdk mgSDK) ConnectThing(thingID, channelID, token string) errors.SDKError { url := fmt.Sprintf("%s/%s/%s/%s/%s/%s", sdk.thingsURL, channelsEndpoint, channelID, thingsEndpoint, thingID, connectEndpoint) _, _, sdkerr := sdk.processRequest(http.MethodPost, url, token, nil, nil, http.StatusCreated) @@ -262,7 +262,7 @@ func (sdk mfSDK) ConnectThing(thingID, channelID, token string) errors.SDKError return sdkerr } -func (sdk mfSDK) DisconnectThing(thingID, channelID, token string) errors.SDKError { +func (sdk mgSDK) DisconnectThing(thingID, channelID, token string) errors.SDKError { url := fmt.Sprintf("%s/%s/%s/%s/%s/%s", sdk.thingsURL, channelsEndpoint, channelID, thingsEndpoint, thingID, disconnectEndpoint) _, _, sdkerr := sdk.processRequest(http.MethodPost, url, token, nil, nil, http.StatusNoContent) @@ -270,15 +270,15 @@ func (sdk mfSDK) DisconnectThing(thingID, channelID, token string) errors.SDKErr return sdkerr } -func (sdk mfSDK) EnableChannel(id, token string) (Channel, errors.SDKError) { +func (sdk mgSDK) EnableChannel(id, token string) (Channel, errors.SDKError) { return sdk.changeChannelStatus(id, enableEndpoint, token) } -func (sdk mfSDK) DisableChannel(id, token string) (Channel, errors.SDKError) { +func (sdk mgSDK) DisableChannel(id, token string) (Channel, errors.SDKError) { return sdk.changeChannelStatus(id, disableEndpoint, token) } -func (sdk mfSDK) changeChannelStatus(id, status, token string) (Channel, errors.SDKError) { +func (sdk mgSDK) changeChannelStatus(id, status, token string) (Channel, errors.SDKError) { url := fmt.Sprintf("%s/%s/%s/%s", sdk.thingsURL, channelsEndpoint, id, status) _, body, err := sdk.processRequest(http.MethodPost, url, token, nil, nil, http.StatusOK) diff --git a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/consumers.go b/vendor/github.com/absmach/magistrala/pkg/sdk/go/consumers.go similarity index 85% rename from vendor/github.com/mainflux/mainflux/pkg/sdk/go/consumers.go rename to vendor/github.com/absmach/magistrala/pkg/sdk/go/consumers.go index ec64f8f2..a360f17c 100644 --- a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/consumers.go +++ b/vendor/github.com/absmach/magistrala/pkg/sdk/go/consumers.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package sdk @@ -9,7 +9,7 @@ import ( "net/http" "strings" - "github.com/mainflux/mainflux/pkg/errors" + "github.com/absmach/magistrala/pkg/errors" ) const ( @@ -23,7 +23,7 @@ type Subscription struct { Contact string `json:"contact,omitempty"` } -func (sdk mfSDK) CreateSubscription(topic, contact, token string) (string, errors.SDKError) { +func (sdk mgSDK) CreateSubscription(topic, contact, token string) (string, errors.SDKError) { sub := Subscription{ Topic: topic, Contact: contact, @@ -45,7 +45,7 @@ func (sdk mfSDK) CreateSubscription(topic, contact, token string) (string, error return id, nil } -func (sdk mfSDK) ListSubscriptions(pm PageMetadata, token string) (SubscriptionPage, errors.SDKError) { +func (sdk mgSDK) ListSubscriptions(pm PageMetadata, token string) (SubscriptionPage, errors.SDKError) { url, err := sdk.withQueryParams(sdk.usersURL, subscriptionEndpoint, pm) if err != nil { return SubscriptionPage{}, errors.NewSDKError(err) @@ -64,7 +64,7 @@ func (sdk mfSDK) ListSubscriptions(pm PageMetadata, token string) (SubscriptionP return sp, nil } -func (sdk mfSDK) ViewSubscription(id, token string) (Subscription, errors.SDKError) { +func (sdk mgSDK) ViewSubscription(id, token string) (Subscription, errors.SDKError) { url := fmt.Sprintf("%s/%s/%s", sdk.usersURL, subscriptionEndpoint, id) _, body, err := sdk.processRequest(http.MethodGet, url, token, nil, nil, http.StatusOK) @@ -80,7 +80,7 @@ func (sdk mfSDK) ViewSubscription(id, token string) (Subscription, errors.SDKErr return sub, nil } -func (sdk mfSDK) DeleteSubscription(id, token string) errors.SDKError { +func (sdk mgSDK) DeleteSubscription(id, token string) errors.SDKError { url := fmt.Sprintf("%s/%s/%s", sdk.usersURL, subscriptionEndpoint, id) _, _, err := sdk.processRequest(http.MethodDelete, url, token, nil, nil, http.StatusNoContent) diff --git a/vendor/github.com/absmach/magistrala/pkg/sdk/go/doc.go b/vendor/github.com/absmach/magistrala/pkg/sdk/go/doc.go new file mode 100644 index 00000000..120f41ea --- /dev/null +++ b/vendor/github.com/absmach/magistrala/pkg/sdk/go/doc.go @@ -0,0 +1,5 @@ +// Copyright (c) Magistrala +// SPDX-License-Identifier: Apache-2.0 + +// Package sdk contains Magistrala SDK. +package sdk diff --git a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/groups.go b/vendor/github.com/absmach/magistrala/pkg/sdk/go/groups.go similarity index 83% rename from vendor/github.com/mainflux/mainflux/pkg/sdk/go/groups.go rename to vendor/github.com/absmach/magistrala/pkg/sdk/go/groups.go index 15d2167a..b9b1fc66 100644 --- a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/groups.go +++ b/vendor/github.com/absmach/magistrala/pkg/sdk/go/groups.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package sdk @@ -9,7 +9,7 @@ import ( "net/http" "time" - "github.com/mainflux/mainflux/pkg/errors" + "github.com/absmach/magistrala/pkg/errors" ) const ( @@ -37,7 +37,7 @@ type Group struct { Status string `json:"status,omitempty"` } -func (sdk mfSDK) CreateGroup(g Group, token string) (Group, errors.SDKError) { +func (sdk mgSDK) CreateGroup(g Group, token string) (Group, errors.SDKError) { data, err := json.Marshal(g) if err != nil { return Group{}, errors.NewSDKError(err) @@ -57,7 +57,7 @@ func (sdk mfSDK) CreateGroup(g Group, token string) (Group, errors.SDKError) { return g, nil } -func (sdk mfSDK) Groups(pm PageMetadata, token string) (GroupsPage, errors.SDKError) { +func (sdk mgSDK) Groups(pm PageMetadata, token string) (GroupsPage, errors.SDKError) { url, err := sdk.withQueryParams(sdk.usersURL, groupsEndpoint, pm) if err != nil { return GroupsPage{}, errors.NewSDKError(err) @@ -66,7 +66,7 @@ func (sdk mfSDK) Groups(pm PageMetadata, token string) (GroupsPage, errors.SDKEr return sdk.getGroups(url, token) } -func (sdk mfSDK) Parents(id string, pm PageMetadata, token string) (GroupsPage, errors.SDKError) { +func (sdk mgSDK) Parents(id string, pm PageMetadata, token string) (GroupsPage, errors.SDKError) { pm.Level = MaxLevel url, err := sdk.withQueryParams(fmt.Sprintf("%s/%s/%s", sdk.usersURL, groupsEndpoint, id), "parents", pm) if err != nil { @@ -76,7 +76,7 @@ func (sdk mfSDK) Parents(id string, pm PageMetadata, token string) (GroupsPage, return sdk.getGroups(url, token) } -func (sdk mfSDK) Children(id string, pm PageMetadata, token string) (GroupsPage, errors.SDKError) { +func (sdk mgSDK) Children(id string, pm PageMetadata, token string) (GroupsPage, errors.SDKError) { pm.Level = MaxLevel url, err := sdk.withQueryParams(fmt.Sprintf("%s/%s/%s", sdk.usersURL, groupsEndpoint, id), "children", pm) if err != nil { @@ -86,7 +86,7 @@ func (sdk mfSDK) Children(id string, pm PageMetadata, token string) (GroupsPage, return sdk.getGroups(url, token) } -func (sdk mfSDK) getGroups(url, token string) (GroupsPage, errors.SDKError) { +func (sdk mgSDK) getGroups(url, token string) (GroupsPage, errors.SDKError) { _, body, err := sdk.processRequest(http.MethodGet, url, token, nil, nil, http.StatusOK) if err != nil { return GroupsPage{}, err @@ -100,7 +100,7 @@ func (sdk mfSDK) getGroups(url, token string) (GroupsPage, errors.SDKError) { return tp, nil } -func (sdk mfSDK) Group(id, token string) (Group, errors.SDKError) { +func (sdk mgSDK) Group(id, token string) (Group, errors.SDKError) { url := fmt.Sprintf("%s/%s/%s", sdk.usersURL, groupsEndpoint, id) _, body, err := sdk.processRequest(http.MethodGet, url, token, nil, nil, http.StatusOK) @@ -116,7 +116,7 @@ func (sdk mfSDK) Group(id, token string) (Group, errors.SDKError) { return t, nil } -func (sdk mfSDK) UpdateGroup(g Group, token string) (Group, errors.SDKError) { +func (sdk mgSDK) UpdateGroup(g Group, token string) (Group, errors.SDKError) { data, err := json.Marshal(g) if err != nil { return Group{}, errors.NewSDKError(err) @@ -137,15 +137,15 @@ func (sdk mfSDK) UpdateGroup(g Group, token string) (Group, errors.SDKError) { return g, nil } -func (sdk mfSDK) EnableGroup(id, token string) (Group, errors.SDKError) { +func (sdk mgSDK) EnableGroup(id, token string) (Group, errors.SDKError) { return sdk.changeGroupStatus(id, enableEndpoint, token) } -func (sdk mfSDK) DisableGroup(id, token string) (Group, errors.SDKError) { +func (sdk mgSDK) DisableGroup(id, token string) (Group, errors.SDKError) { return sdk.changeGroupStatus(id, disableEndpoint, token) } -func (sdk mfSDK) AddUserToGroup(groupID string, req UsersRelationRequest, token string) errors.SDKError { +func (sdk mgSDK) AddUserToGroup(groupID string, req UsersRelationRequest, token string) errors.SDKError { data, err := json.Marshal(req) if err != nil { return errors.NewSDKError(err) @@ -157,7 +157,7 @@ func (sdk mfSDK) AddUserToGroup(groupID string, req UsersRelationRequest, token return sdkerr } -func (sdk mfSDK) RemoveUserFromGroup(groupID string, req UsersRelationRequest, token string) errors.SDKError { +func (sdk mgSDK) RemoveUserFromGroup(groupID string, req UsersRelationRequest, token string) errors.SDKError { data, err := json.Marshal(req) if err != nil { return errors.NewSDKError(err) @@ -169,7 +169,7 @@ func (sdk mfSDK) RemoveUserFromGroup(groupID string, req UsersRelationRequest, t return sdkerr } -func (sdk mfSDK) ListGroupUsers(groupID string, pm PageMetadata, token string) (UsersPage, errors.SDKError) { +func (sdk mgSDK) ListGroupUsers(groupID string, pm PageMetadata, token string) (UsersPage, errors.SDKError) { url, err := sdk.withQueryParams(sdk.usersURL, fmt.Sprintf("%s/%s/%s", groupsEndpoint, groupID, usersEndpoint), pm) if err != nil { return UsersPage{}, errors.NewSDKError(err) @@ -186,8 +186,8 @@ func (sdk mfSDK) ListGroupUsers(groupID string, pm PageMetadata, token string) ( return up, nil } -func (sdk mfSDK) ListGroupChannels(groupID string, pm PageMetadata, token string) (GroupsPage, errors.SDKError) { - url, err := sdk.withQueryParams(sdk.usersURL, fmt.Sprintf("%s/%s/%s", groupsEndpoint, groupID, channelsEndpoint), pm) +func (sdk mgSDK) ListGroupChannels(groupID string, pm PageMetadata, token string) (GroupsPage, errors.SDKError) { + url, err := sdk.withQueryParams(sdk.thingsURL, fmt.Sprintf("%s/%s/%s", groupsEndpoint, groupID, channelsEndpoint), pm) if err != nil { return GroupsPage{}, errors.NewSDKError(err) } @@ -203,7 +203,7 @@ func (sdk mfSDK) ListGroupChannels(groupID string, pm PageMetadata, token string return gp, nil } -func (sdk mfSDK) changeGroupStatus(id, status, token string) (Group, errors.SDKError) { +func (sdk mgSDK) changeGroupStatus(id, status, token string) (Group, errors.SDKError) { url := fmt.Sprintf("%s/%s/%s/%s", sdk.usersURL, groupsEndpoint, id, status) _, body, err := sdk.processRequest(http.MethodPost, url, token, nil, nil, http.StatusOK) diff --git a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/health.go b/vendor/github.com/absmach/magistrala/pkg/sdk/go/health.go similarity index 91% rename from vendor/github.com/mainflux/mainflux/pkg/sdk/go/health.go rename to vendor/github.com/absmach/magistrala/pkg/sdk/go/health.go index 6d218ca5..1c732c03 100644 --- a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/health.go +++ b/vendor/github.com/absmach/magistrala/pkg/sdk/go/health.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package sdk @@ -8,7 +8,7 @@ import ( "fmt" "net/http" - "github.com/mainflux/mainflux/pkg/errors" + "github.com/absmach/magistrala/pkg/errors" ) // HealthInfo contains version endpoint response. @@ -29,7 +29,7 @@ type HealthInfo struct { BuildTime string `json:"build_time"` } -func (sdk mfSDK) Health(service string) (HealthInfo, errors.SDKError) { +func (sdk mgSDK) Health(service string) (HealthInfo, errors.SDKError) { var url string switch service { case "things": diff --git a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/message.go b/vendor/github.com/absmach/magistrala/pkg/sdk/go/message.go similarity index 82% rename from vendor/github.com/mainflux/mainflux/pkg/sdk/go/message.go rename to vendor/github.com/absmach/magistrala/pkg/sdk/go/message.go index bdb2cd73..30f5a633 100644 --- a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/message.go +++ b/vendor/github.com/absmach/magistrala/pkg/sdk/go/message.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package sdk @@ -9,13 +9,13 @@ import ( "net/http" "strings" - "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/pkg/errors" + "github.com/absmach/magistrala/internal/apiutil" + "github.com/absmach/magistrala/pkg/errors" ) const channelParts = 2 -func (sdk mfSDK) SendMessage(chanName, msg, key string) errors.SDKError { +func (sdk mgSDK) SendMessage(chanName, msg, key string) errors.SDKError { chanNameParts := strings.SplitN(chanName, ".", channelParts) chanID := chanNameParts[0] subtopicPart := "" @@ -30,7 +30,7 @@ func (sdk mfSDK) SendMessage(chanName, msg, key string) errors.SDKError { return err } -func (sdk mfSDK) ReadMessages(chanName, token string) (MessagesPage, errors.SDKError) { +func (sdk mgSDK) ReadMessages(chanName, token string) (MessagesPage, errors.SDKError) { chanNameParts := strings.SplitN(chanName, ".", channelParts) chanID := chanNameParts[0] subtopicPart := "" @@ -56,7 +56,7 @@ func (sdk mfSDK) ReadMessages(chanName, token string) (MessagesPage, errors.SDKE return mp, nil } -func (sdk *mfSDK) SetContentType(ct ContentType) errors.SDKError { +func (sdk *mgSDK) SetContentType(ct ContentType) errors.SDKError { if ct != CTJSON && ct != CTJSONSenML && ct != CTBinary { return errors.NewSDKError(apiutil.ErrUnsupportedContentType) } diff --git a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/metadata.go b/vendor/github.com/absmach/magistrala/pkg/sdk/go/metadata.go similarity index 76% rename from vendor/github.com/mainflux/mainflux/pkg/sdk/go/metadata.go rename to vendor/github.com/absmach/magistrala/pkg/sdk/go/metadata.go index 84bbeabf..f9b5360b 100644 --- a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/metadata.go +++ b/vendor/github.com/absmach/magistrala/pkg/sdk/go/metadata.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package sdk diff --git a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/requests.go b/vendor/github.com/absmach/magistrala/pkg/sdk/go/requests.go similarity index 98% rename from vendor/github.com/mainflux/mainflux/pkg/sdk/go/requests.go rename to vendor/github.com/absmach/magistrala/pkg/sdk/go/requests.go index aabe9ce0..6cd714b8 100644 --- a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/requests.go +++ b/vendor/github.com/absmach/magistrala/pkg/sdk/go/requests.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package sdk diff --git a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/responses.go b/vendor/github.com/absmach/magistrala/pkg/sdk/go/responses.go similarity index 95% rename from vendor/github.com/mainflux/mainflux/pkg/sdk/go/responses.go rename to vendor/github.com/absmach/magistrala/pkg/sdk/go/responses.go index cb3a61a4..63d2f095 100644 --- a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/responses.go +++ b/vendor/github.com/absmach/magistrala/pkg/sdk/go/responses.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package sdk @@ -6,7 +6,7 @@ package sdk import ( "time" - "github.com/mainflux/mainflux/pkg/transformers/senml" + "github.com/absmach/magistrala/pkg/transformers/senml" ) type createThingsRes struct { diff --git a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/sdk.go b/vendor/github.com/absmach/magistrala/pkg/sdk/go/sdk.go similarity index 98% rename from vendor/github.com/mainflux/mainflux/pkg/sdk/go/sdk.go rename to vendor/github.com/absmach/magistrala/pkg/sdk/go/sdk.go index 0df01ec1..7fe3f55b 100644 --- a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/sdk.go +++ b/vendor/github.com/absmach/magistrala/pkg/sdk/go/sdk.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package sdk @@ -15,7 +15,7 @@ import ( "strings" "time" - "github.com/mainflux/mainflux/pkg/errors" + "github.com/absmach/magistrala/pkg/errors" ) const ( @@ -42,7 +42,7 @@ const ( // ContentType represents all possible content types. type ContentType string -var _ SDK = (*mfSDK)(nil) +var _ SDK = (*mgSDK)(nil) var ( // ErrFailedCreation indicates that entity creation failed. @@ -101,9 +101,9 @@ type Credentials struct { Secret string `json:"secret,omitempty"` // password or token } -// SDK contains Mainflux API. +// SDK contains Magistrala API. type SDK interface { - // CreateUser registers mainflux user. + // CreateUser registers magistrala user. // // example: // user := sdk.User{ @@ -960,7 +960,7 @@ type SDK interface { DeleteSubscription(id, token string) errors.SDKError } -type mfSDK struct { +type mgSDK struct { bootstrapURL string certsURL string httpAdapterURL string @@ -987,9 +987,9 @@ type Config struct { TLSVerification bool } -// NewSDK returns new mainflux SDK instance. +// NewSDK returns new magistrala SDK instance. func NewSDK(conf Config) SDK { - return &mfSDK{ + return &mgSDK{ bootstrapURL: conf.BootstrapURL, certsURL: conf.CertsURL, httpAdapterURL: conf.HTTPAdapterURL, @@ -1011,7 +1011,7 @@ func NewSDK(conf Config) SDK { // processRequest creates and send a new HTTP request, and checks for errors in the HTTP response. // It then returns the response headers, the response body, and the associated error(s) (if any). -func (sdk mfSDK) processRequest(method, url, token string, data []byte, headers map[string]string, expectedRespCodes ...int) (http.Header, []byte, errors.SDKError) { +func (sdk mgSDK) processRequest(method, url, token string, data []byte, headers map[string]string, expectedRespCodes ...int) (http.Header, []byte, errors.SDKError) { req, err := http.NewRequest(method, url, bytes.NewReader(data)) if err != nil { return make(http.Header), []byte{}, errors.NewSDKError(err) @@ -1051,7 +1051,7 @@ func (sdk mfSDK) processRequest(method, url, token string, data []byte, headers return resp.Header, body, nil } -func (sdk mfSDK) withQueryParams(baseURL, endpoint string, pm PageMetadata) (string, error) { +func (sdk mgSDK) withQueryParams(baseURL, endpoint string, pm PageMetadata) (string, error) { q, err := pm.query() if err != nil { return "", err diff --git a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/things.go b/vendor/github.com/absmach/magistrala/pkg/sdk/go/things.go similarity index 85% rename from vendor/github.com/mainflux/mainflux/pkg/sdk/go/things.go rename to vendor/github.com/absmach/magistrala/pkg/sdk/go/things.go index b564d43c..94ac9834 100644 --- a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/things.go +++ b/vendor/github.com/absmach/magistrala/pkg/sdk/go/things.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package sdk @@ -9,7 +9,7 @@ import ( "net/http" "time" - "github.com/mainflux/mainflux/pkg/errors" + "github.com/absmach/magistrala/pkg/errors" ) const ( @@ -21,7 +21,7 @@ const ( unshareEndpoint = "unshare" ) -// Thing represents mainflux thing. +// Thing represents magistrala thing. type Thing struct { ID string `json:"id"` Name string `json:"name,omitempty"` @@ -34,7 +34,7 @@ type Thing struct { Status string `json:"status,omitempty"` } -func (sdk mfSDK) CreateThing(thing Thing, token string) (Thing, errors.SDKError) { +func (sdk mgSDK) CreateThing(thing Thing, token string) (Thing, errors.SDKError) { data, err := json.Marshal(thing) if err != nil { return Thing{}, errors.NewSDKError(err) @@ -55,7 +55,7 @@ func (sdk mfSDK) CreateThing(thing Thing, token string) (Thing, errors.SDKError) return thing, nil } -func (sdk mfSDK) CreateThings(things []Thing, token string) ([]Thing, errors.SDKError) { +func (sdk mgSDK) CreateThings(things []Thing, token string) ([]Thing, errors.SDKError) { data, err := json.Marshal(things) if err != nil { return []Thing{}, errors.NewSDKError(err) @@ -76,7 +76,7 @@ func (sdk mfSDK) CreateThings(things []Thing, token string) ([]Thing, errors.SDK return ctr.Things, nil } -func (sdk mfSDK) Things(pm PageMetadata, token string) (ThingsPage, errors.SDKError) { +func (sdk mgSDK) Things(pm PageMetadata, token string) (ThingsPage, errors.SDKError) { url, err := sdk.withQueryParams(sdk.thingsURL, thingsEndpoint, pm) if err != nil { return ThingsPage{}, errors.NewSDKError(err) @@ -95,7 +95,7 @@ func (sdk mfSDK) Things(pm PageMetadata, token string) (ThingsPage, errors.SDKEr return cp, nil } -func (sdk mfSDK) ThingsByChannel(chanID string, pm PageMetadata, token string) (ThingsPage, errors.SDKError) { +func (sdk mgSDK) ThingsByChannel(chanID string, pm PageMetadata, token string) (ThingsPage, errors.SDKError) { url, err := sdk.withQueryParams(sdk.thingsURL, fmt.Sprintf("channels/%s/%s", chanID, thingsEndpoint), pm) if err != nil { return ThingsPage{}, errors.NewSDKError(err) @@ -114,7 +114,7 @@ func (sdk mfSDK) ThingsByChannel(chanID string, pm PageMetadata, token string) ( return tp, nil } -func (sdk mfSDK) Thing(id, token string) (Thing, errors.SDKError) { +func (sdk mgSDK) Thing(id, token string) (Thing, errors.SDKError) { url := fmt.Sprintf("%s/%s/%s", sdk.thingsURL, thingsEndpoint, id) _, body, sdkerr := sdk.processRequest(http.MethodGet, url, token, nil, nil, http.StatusOK) @@ -130,7 +130,7 @@ func (sdk mfSDK) Thing(id, token string) (Thing, errors.SDKError) { return t, nil } -func (sdk mfSDK) UpdateThing(t Thing, token string) (Thing, errors.SDKError) { +func (sdk mgSDK) UpdateThing(t Thing, token string) (Thing, errors.SDKError) { data, err := json.Marshal(t) if err != nil { return Thing{}, errors.NewSDKError(err) @@ -151,7 +151,7 @@ func (sdk mfSDK) UpdateThing(t Thing, token string) (Thing, errors.SDKError) { return t, nil } -func (sdk mfSDK) UpdateThingTags(t Thing, token string) (Thing, errors.SDKError) { +func (sdk mgSDK) UpdateThingTags(t Thing, token string) (Thing, errors.SDKError) { data, err := json.Marshal(t) if err != nil { return Thing{}, errors.NewSDKError(err) @@ -172,7 +172,7 @@ func (sdk mfSDK) UpdateThingTags(t Thing, token string) (Thing, errors.SDKError) return t, nil } -func (sdk mfSDK) UpdateThingSecret(id, secret, token string) (Thing, errors.SDKError) { +func (sdk mgSDK) UpdateThingSecret(id, secret, token string) (Thing, errors.SDKError) { ucsr := updateThingSecretReq{Secret: secret} data, err := json.Marshal(ucsr) @@ -195,7 +195,7 @@ func (sdk mfSDK) UpdateThingSecret(id, secret, token string) (Thing, errors.SDKE return t, nil } -func (sdk mfSDK) UpdateThingOwner(t Thing, token string) (Thing, errors.SDKError) { +func (sdk mgSDK) UpdateThingOwner(t Thing, token string) (Thing, errors.SDKError) { data, err := json.Marshal(t) if err != nil { return Thing{}, errors.NewSDKError(err) @@ -216,15 +216,15 @@ func (sdk mfSDK) UpdateThingOwner(t Thing, token string) (Thing, errors.SDKError return t, nil } -func (sdk mfSDK) EnableThing(id, token string) (Thing, errors.SDKError) { +func (sdk mgSDK) EnableThing(id, token string) (Thing, errors.SDKError) { return sdk.changeThingStatus(id, enableEndpoint, token) } -func (sdk mfSDK) DisableThing(id, token string) (Thing, errors.SDKError) { +func (sdk mgSDK) DisableThing(id, token string) (Thing, errors.SDKError) { return sdk.changeThingStatus(id, disableEndpoint, token) } -func (sdk mfSDK) changeThingStatus(id, status, token string) (Thing, errors.SDKError) { +func (sdk mgSDK) changeThingStatus(id, status, token string) (Thing, errors.SDKError) { url := fmt.Sprintf("%s/%s/%s/%s", sdk.thingsURL, thingsEndpoint, id, status) _, body, sdkerr := sdk.processRequest(http.MethodPost, url, token, nil, nil, http.StatusOK) @@ -240,7 +240,7 @@ func (sdk mfSDK) changeThingStatus(id, status, token string) (Thing, errors.SDKE return t, nil } -func (sdk mfSDK) IdentifyThing(key string) (string, errors.SDKError) { +func (sdk mgSDK) IdentifyThing(key string) (string, errors.SDKError) { url := fmt.Sprintf("%s/%s", sdk.thingsURL, identifyEndpoint) _, body, sdkerr := sdk.processRequest(http.MethodPost, url, ThingPrefix+key, nil, nil, http.StatusOK) @@ -256,7 +256,7 @@ func (sdk mfSDK) IdentifyThing(key string) (string, errors.SDKError) { return i.ID, nil } -func (sdk mfSDK) ShareThing(thingID string, req UsersRelationRequest, token string) errors.SDKError { +func (sdk mgSDK) ShareThing(thingID string, req UsersRelationRequest, token string) errors.SDKError { data, err := json.Marshal(req) if err != nil { return errors.NewSDKError(err) @@ -264,11 +264,11 @@ func (sdk mfSDK) ShareThing(thingID string, req UsersRelationRequest, token stri url := fmt.Sprintf("%s/%s/%s/%s", sdk.thingsURL, thingsEndpoint, thingID, shareEndpoint) - _, _, sdkerr := sdk.processRequest(http.MethodPost, url, token, data, nil, http.StatusOK) + _, _, sdkerr := sdk.processRequest(http.MethodPost, url, token, data, nil, http.StatusCreated) return sdkerr } -func (sdk mfSDK) UnshareThing(thingID string, req UsersRelationRequest, token string) errors.SDKError { +func (sdk mgSDK) UnshareThing(thingID string, req UsersRelationRequest, token string) errors.SDKError { data, err := json.Marshal(req) if err != nil { return errors.NewSDKError(err) @@ -276,11 +276,11 @@ func (sdk mfSDK) UnshareThing(thingID string, req UsersRelationRequest, token st url := fmt.Sprintf("%s/%s/%s/%s", sdk.thingsURL, thingsEndpoint, thingID, unshareEndpoint) - _, _, sdkerr := sdk.processRequest(http.MethodPost, url, token, data, nil, http.StatusOK) + _, _, sdkerr := sdk.processRequest(http.MethodPost, url, token, data, nil, http.StatusNoContent) return sdkerr } -func (sdk mfSDK) ListThingUsers(thingID string, pm PageMetadata, token string) (UsersPage, errors.SDKError) { +func (sdk mgSDK) ListThingUsers(thingID string, pm PageMetadata, token string) (UsersPage, errors.SDKError) { url, err := sdk.withQueryParams(sdk.usersURL, fmt.Sprintf("%s/%s/%s", thingsEndpoint, thingID, usersEndpoint), pm) if err != nil { return UsersPage{}, errors.NewSDKError(err) diff --git a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/tokens.go b/vendor/github.com/absmach/magistrala/pkg/sdk/go/tokens.go similarity index 86% rename from vendor/github.com/mainflux/mainflux/pkg/sdk/go/tokens.go rename to vendor/github.com/absmach/magistrala/pkg/sdk/go/tokens.go index 476cb54c..634f7179 100644 --- a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/tokens.go +++ b/vendor/github.com/absmach/magistrala/pkg/sdk/go/tokens.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package sdk @@ -8,7 +8,7 @@ import ( "fmt" "net/http" - "github.com/mainflux/mainflux/pkg/errors" + "github.com/absmach/magistrala/pkg/errors" ) // Token is used for authentication purposes. @@ -19,7 +19,7 @@ type Token struct { AccessType string `json:"access_type,omitempty"` } -func (sdk mfSDK) CreateToken(user User) (Token, errors.SDKError) { +func (sdk mgSDK) CreateToken(user User) (Token, errors.SDKError) { treq := tokenReq{ Identity: user.Credentials.Identity, Secret: user.Credentials.Secret, @@ -43,7 +43,7 @@ func (sdk mfSDK) CreateToken(user User) (Token, errors.SDKError) { return token, nil } -func (sdk mfSDK) RefreshToken(token string) (Token, errors.SDKError) { +func (sdk mgSDK) RefreshToken(token string) (Token, errors.SDKError) { url := fmt.Sprintf("%s/%s/%s", sdk.usersURL, usersEndpoint, refreshTokenEndpoint) _, body, sdkerr := sdk.processRequest(http.MethodPost, url, token, []byte{}, nil, http.StatusCreated) diff --git a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/users.go b/vendor/github.com/absmach/magistrala/pkg/sdk/go/users.go similarity index 83% rename from vendor/github.com/mainflux/mainflux/pkg/sdk/go/users.go rename to vendor/github.com/absmach/magistrala/pkg/sdk/go/users.go index 9a895e42..5702dc50 100644 --- a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/users.go +++ b/vendor/github.com/absmach/magistrala/pkg/sdk/go/users.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package sdk @@ -9,7 +9,7 @@ import ( "net/http" "time" - "github.com/mainflux/mainflux/pkg/errors" + "github.com/absmach/magistrala/pkg/errors" ) const ( @@ -24,7 +24,7 @@ const ( PasswordResetEndpoint = "password" ) -// User represents mainflux user its credentials. +// User represents magistrala user its credentials. type User struct { ID string `json:"id"` Name string `json:"name,omitempty"` @@ -38,7 +38,7 @@ type User struct { Role string `json:"role,omitempty"` } -func (sdk mfSDK) CreateUser(user User, token string) (User, errors.SDKError) { +func (sdk mgSDK) CreateUser(user User, token string) (User, errors.SDKError) { data, err := json.Marshal(user) if err != nil { return User{}, errors.NewSDKError(err) @@ -59,7 +59,7 @@ func (sdk mfSDK) CreateUser(user User, token string) (User, errors.SDKError) { return user, nil } -func (sdk mfSDK) Users(pm PageMetadata, token string) (UsersPage, errors.SDKError) { +func (sdk mgSDK) Users(pm PageMetadata, token string) (UsersPage, errors.SDKError) { url, err := sdk.withQueryParams(sdk.usersURL, usersEndpoint, pm) if err != nil { return UsersPage{}, errors.NewSDKError(err) @@ -78,7 +78,7 @@ func (sdk mfSDK) Users(pm PageMetadata, token string) (UsersPage, errors.SDKErro return cp, nil } -func (sdk mfSDK) Members(groupID string, meta PageMetadata, token string) (MembersPage, errors.SDKError) { +func (sdk mgSDK) Members(groupID string, meta PageMetadata, token string) (MembersPage, errors.SDKError) { url, err := sdk.withQueryParams(sdk.usersURL, fmt.Sprintf("%s/%s/%s", groupsEndpoint, groupID, membersEndpoint), meta) if err != nil { return MembersPage{}, errors.NewSDKError(err) @@ -97,7 +97,7 @@ func (sdk mfSDK) Members(groupID string, meta PageMetadata, token string) (Membe return mp, nil } -func (sdk mfSDK) User(id, token string) (User, errors.SDKError) { +func (sdk mgSDK) User(id, token string) (User, errors.SDKError) { url := fmt.Sprintf("%s/%s/%s", sdk.usersURL, usersEndpoint, id) _, body, sdkerr := sdk.processRequest(http.MethodGet, url, token, nil, nil, http.StatusOK) @@ -113,7 +113,7 @@ func (sdk mfSDK) User(id, token string) (User, errors.SDKError) { return user, nil } -func (sdk mfSDK) UserProfile(token string) (User, errors.SDKError) { +func (sdk mgSDK) UserProfile(token string) (User, errors.SDKError) { url := fmt.Sprintf("%s/%s/profile", sdk.usersURL, usersEndpoint) _, body, sdkerr := sdk.processRequest(http.MethodGet, url, token, nil, nil, http.StatusOK) @@ -129,7 +129,7 @@ func (sdk mfSDK) UserProfile(token string) (User, errors.SDKError) { return user, nil } -func (sdk mfSDK) UpdateUser(user User, token string) (User, errors.SDKError) { +func (sdk mgSDK) UpdateUser(user User, token string) (User, errors.SDKError) { data, err := json.Marshal(user) if err != nil { return User{}, errors.NewSDKError(err) @@ -150,7 +150,7 @@ func (sdk mfSDK) UpdateUser(user User, token string) (User, errors.SDKError) { return user, nil } -func (sdk mfSDK) UpdateUserTags(user User, token string) (User, errors.SDKError) { +func (sdk mgSDK) UpdateUserTags(user User, token string) (User, errors.SDKError) { data, err := json.Marshal(user) if err != nil { return User{}, errors.NewSDKError(err) @@ -171,7 +171,7 @@ func (sdk mfSDK) UpdateUserTags(user User, token string) (User, errors.SDKError) return user, nil } -func (sdk mfSDK) UpdateUserIdentity(user User, token string) (User, errors.SDKError) { +func (sdk mgSDK) UpdateUserIdentity(user User, token string) (User, errors.SDKError) { ucir := updateClientIdentityReq{token: token, id: user.ID, Identity: user.Credentials.Identity} data, err := json.Marshal(ucir) @@ -194,14 +194,14 @@ func (sdk mfSDK) UpdateUserIdentity(user User, token string) (User, errors.SDKEr return user, nil } -func (sdk mfSDK) ResetPasswordRequest(email string) errors.SDKError { +func (sdk mgSDK) ResetPasswordRequest(email string) errors.SDKError { rpr := resetPasswordRequestreq{Email: email} data, err := json.Marshal(rpr) if err != nil { return errors.NewSDKError(err) } - url := fmt.Sprintf("%s/%s/password/reset", sdk.usersURL, PasswordResetEndpoint) + url := fmt.Sprintf("%s/%s/reset-request", sdk.usersURL, PasswordResetEndpoint) header := make(map[string]string) header["Referer"] = sdk.HostURL @@ -211,7 +211,7 @@ func (sdk mfSDK) ResetPasswordRequest(email string) errors.SDKError { return sdkerr } -func (sdk mfSDK) ResetPassword(password, confPass, token string) errors.SDKError { +func (sdk mgSDK) ResetPassword(password, confPass, token string) errors.SDKError { rpr := resetPasswordReq{Token: token, Password: password, ConfPass: confPass} data, err := json.Marshal(rpr) @@ -225,7 +225,7 @@ func (sdk mfSDK) ResetPassword(password, confPass, token string) errors.SDKError return sdkerr } -func (sdk mfSDK) UpdatePassword(oldPass, newPass, token string) (User, errors.SDKError) { +func (sdk mgSDK) UpdatePassword(oldPass, newPass, token string) (User, errors.SDKError) { ucsr := updateClientSecretReq{OldSecret: oldPass, NewSecret: newPass} data, err := json.Marshal(ucsr) @@ -248,7 +248,7 @@ func (sdk mfSDK) UpdatePassword(oldPass, newPass, token string) (User, errors.SD return user, nil } -func (sdk mfSDK) UpdateUserOwner(user User, token string) (User, errors.SDKError) { +func (sdk mgSDK) UpdateUserOwner(user User, token string) (User, errors.SDKError) { data, err := json.Marshal(user) if err != nil { return User{}, errors.NewSDKError(err) @@ -269,8 +269,8 @@ func (sdk mfSDK) UpdateUserOwner(user User, token string) (User, errors.SDKError return user, nil } -func (sdk mfSDK) ListUserChannels(userID string, pm PageMetadata, token string) (ChannelsPage, errors.SDKError) { - url, err := sdk.withQueryParams(sdk.usersURL, fmt.Sprintf("%s/%s/%s", usersEndpoint, userID, channelsEndpoint), pm) +func (sdk mgSDK) ListUserChannels(userID string, pm PageMetadata, token string) (ChannelsPage, errors.SDKError) { + url, err := sdk.withQueryParams(sdk.thingsURL, fmt.Sprintf("%s/%s/%s", usersEndpoint, userID, channelsEndpoint), pm) if err != nil { return ChannelsPage{}, errors.NewSDKError(err) } @@ -287,7 +287,7 @@ func (sdk mfSDK) ListUserChannels(userID string, pm PageMetadata, token string) return cp, nil } -func (sdk mfSDK) ListUserGroups(userID string, pm PageMetadata, token string) (GroupsPage, errors.SDKError) { +func (sdk mgSDK) ListUserGroups(userID string, pm PageMetadata, token string) (GroupsPage, errors.SDKError) { url, err := sdk.withQueryParams(sdk.usersURL, fmt.Sprintf("%s/%s/%s", usersEndpoint, userID, groupsEndpoint), pm) if err != nil { return GroupsPage{}, errors.NewSDKError(err) @@ -304,8 +304,8 @@ func (sdk mfSDK) ListUserGroups(userID string, pm PageMetadata, token string) (G return gp, nil } -func (sdk mfSDK) ListUserThings(userID string, pm PageMetadata, token string) (ThingsPage, errors.SDKError) { - url, err := sdk.withQueryParams(sdk.usersURL, fmt.Sprintf("%s/%s/%s", usersEndpoint, userID, thingsEndpoint), pm) +func (sdk mgSDK) ListUserThings(userID string, pm PageMetadata, token string) (ThingsPage, errors.SDKError) { + url, err := sdk.withQueryParams(sdk.thingsURL, fmt.Sprintf("%s/%s/%s", usersEndpoint, userID, thingsEndpoint), pm) if err != nil { return ThingsPage{}, errors.NewSDKError(err) } @@ -321,15 +321,15 @@ func (sdk mfSDK) ListUserThings(userID string, pm PageMetadata, token string) (T return tp, nil } -func (sdk mfSDK) EnableUser(id, token string) (User, errors.SDKError) { +func (sdk mgSDK) EnableUser(id, token string) (User, errors.SDKError) { return sdk.changeClientStatus(token, id, enableEndpoint) } -func (sdk mfSDK) DisableUser(id, token string) (User, errors.SDKError) { +func (sdk mgSDK) DisableUser(id, token string) (User, errors.SDKError) { return sdk.changeClientStatus(token, id, disableEndpoint) } -func (sdk mfSDK) changeClientStatus(token, id, status string) (User, errors.SDKError) { +func (sdk mgSDK) changeClientStatus(token, id, status string) (User, errors.SDKError) { url := fmt.Sprintf("%s/%s/%s/%s", sdk.usersURL, usersEndpoint, id, status) _, body, sdkerr := sdk.processRequest(http.MethodPost, url, token, nil, nil, http.StatusOK) diff --git a/vendor/github.com/absmach/magistrala/pkg/transformers/README.md b/vendor/github.com/absmach/magistrala/pkg/transformers/README.md new file mode 100644 index 00000000..44a21202 --- /dev/null +++ b/vendor/github.com/absmach/magistrala/pkg/transformers/README.md @@ -0,0 +1,10 @@ +# Message Transformers + +A transformer service consumes events published by Magistrala adapters (such as MQTT and HTTP adapters) and transforms them to an arbitrary message format. A transformer can be imported as a standalone package and used for message transformation on the consumer side. + +Magistrala [SenML transformer](transformer) is an example of Transformer service for SenML messages. + +Magistrala [writers](writers) are using a standalone SenML transformer to preprocess messages before storing them. + +[transformers]: https://github.com/absmach/magistrala/tree/master/transformers/senml +[writers]: https://github.com/absmach/magistrala/tree/master/writers diff --git a/vendor/github.com/mainflux/mainflux/pkg/transformers/doc.go b/vendor/github.com/absmach/magistrala/pkg/transformers/doc.go similarity index 61% rename from vendor/github.com/mainflux/mainflux/pkg/transformers/doc.go rename to vendor/github.com/absmach/magistrala/pkg/transformers/doc.go index dd36cb7f..80050df3 100644 --- a/vendor/github.com/mainflux/mainflux/pkg/transformers/doc.go +++ b/vendor/github.com/absmach/magistrala/pkg/transformers/doc.go @@ -1,6 +1,6 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 // Package transformers contains the domain concept definitions needed to -// support Mainflux transformer services functionality. +// support Magistrala transformer services functionality. package transformers diff --git a/vendor/github.com/absmach/magistrala/pkg/transformers/senml/README.md b/vendor/github.com/absmach/magistrala/pkg/transformers/senml/README.md new file mode 100644 index 00000000..d5dbd00e --- /dev/null +++ b/vendor/github.com/absmach/magistrala/pkg/transformers/senml/README.md @@ -0,0 +1,4 @@ +# SenML Message Transformer + +SenML Transformer provides Message Transformer for SenML messages. +It supports JSON and CBOR content types - To transform Magistrala Message successfully, the payload must be either JSON or CBOR encoded SenML message. diff --git a/vendor/github.com/mainflux/mainflux/pkg/transformers/senml/doc.go b/vendor/github.com/absmach/magistrala/pkg/transformers/senml/doc.go similarity index 77% rename from vendor/github.com/mainflux/mainflux/pkg/transformers/senml/doc.go rename to vendor/github.com/absmach/magistrala/pkg/transformers/senml/doc.go index 6e6e0679..04234dd4 100644 --- a/vendor/github.com/mainflux/mainflux/pkg/transformers/senml/doc.go +++ b/vendor/github.com/absmach/magistrala/pkg/transformers/senml/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 // Package senml contains SenML transformer. diff --git a/vendor/github.com/mainflux/mainflux/pkg/transformers/senml/message.go b/vendor/github.com/absmach/magistrala/pkg/transformers/senml/message.go similarity index 100% rename from vendor/github.com/mainflux/mainflux/pkg/transformers/senml/message.go rename to vendor/github.com/absmach/magistrala/pkg/transformers/senml/message.go diff --git a/vendor/github.com/mainflux/mainflux/pkg/transformers/senml/transformer.go b/vendor/github.com/absmach/magistrala/pkg/transformers/senml/transformer.go similarity index 91% rename from vendor/github.com/mainflux/mainflux/pkg/transformers/senml/transformer.go rename to vendor/github.com/absmach/magistrala/pkg/transformers/senml/transformer.go index 00c86375..f3fe1355 100644 --- a/vendor/github.com/mainflux/mainflux/pkg/transformers/senml/transformer.go +++ b/vendor/github.com/absmach/magistrala/pkg/transformers/senml/transformer.go @@ -1,12 +1,12 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package senml import ( - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/pkg/messaging" - "github.com/mainflux/mainflux/pkg/transformers" + "github.com/absmach/magistrala/pkg/errors" + "github.com/absmach/magistrala/pkg/messaging" + "github.com/absmach/magistrala/pkg/transformers" "github.com/mainflux/senml" ) diff --git a/vendor/github.com/mainflux/mainflux/pkg/transformers/transformer.go b/vendor/github.com/absmach/magistrala/pkg/transformers/transformer.go similarity index 60% rename from vendor/github.com/mainflux/mainflux/pkg/transformers/transformer.go rename to vendor/github.com/absmach/magistrala/pkg/transformers/transformer.go index 82b0cfbc..60797f57 100644 --- a/vendor/github.com/mainflux/mainflux/pkg/transformers/transformer.go +++ b/vendor/github.com/absmach/magistrala/pkg/transformers/transformer.go @@ -1,12 +1,12 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package transformers -import "github.com/mainflux/mainflux/pkg/messaging" +import "github.com/absmach/magistrala/pkg/messaging" // Transformer specifies API form Message transformer. type Transformer interface { - // Transform Mainflux message to any other format. + // Transform Magistrala message to any other format. Transform(msg *messaging.Message) (interface{}, error) } diff --git a/vendor/github.com/mainflux/mainflux/pkg/uuid/README.md b/vendor/github.com/absmach/magistrala/pkg/uuid/README.md similarity index 100% rename from vendor/github.com/mainflux/mainflux/pkg/uuid/README.md rename to vendor/github.com/absmach/magistrala/pkg/uuid/README.md diff --git a/vendor/github.com/mainflux/mainflux/pkg/uuid/doc.go b/vendor/github.com/absmach/magistrala/pkg/uuid/doc.go similarity index 77% rename from vendor/github.com/mainflux/mainflux/pkg/uuid/doc.go rename to vendor/github.com/absmach/magistrala/pkg/uuid/doc.go index 20306432..8cabc606 100644 --- a/vendor/github.com/mainflux/mainflux/pkg/uuid/doc.go +++ b/vendor/github.com/absmach/magistrala/pkg/uuid/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 // Package uuid contains UUID generator. diff --git a/vendor/github.com/mainflux/mainflux/pkg/uuid/mock.go b/vendor/github.com/absmach/magistrala/pkg/uuid/mock.go similarity index 78% rename from vendor/github.com/mainflux/mainflux/pkg/uuid/mock.go rename to vendor/github.com/absmach/magistrala/pkg/uuid/mock.go index 37c119d1..6bcb1269 100644 --- a/vendor/github.com/mainflux/mainflux/pkg/uuid/mock.go +++ b/vendor/github.com/absmach/magistrala/pkg/uuid/mock.go @@ -1,4 +1,4 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 package uuid @@ -7,13 +7,13 @@ import ( "fmt" "sync" - "github.com/mainflux/mainflux" + "github.com/absmach/magistrala" ) // Prefix represents the prefix used to generate UUID mocks. const Prefix = "123e4567-e89b-12d3-a456-" -var _ mainflux.IDProvider = (*uuidProviderMock)(nil) +var _ magistrala.IDProvider = (*uuidProviderMock)(nil) type uuidProviderMock struct { mu sync.Mutex @@ -30,6 +30,6 @@ func (up *uuidProviderMock) ID() (string, error) { // NewMock creates "mirror" uuid provider, i.e. generated // token will hold value provided by the caller. -func NewMock() mainflux.IDProvider { +func NewMock() magistrala.IDProvider { return &uuidProviderMock{} } diff --git a/vendor/github.com/mainflux/mainflux/pkg/uuid/uuid.go b/vendor/github.com/absmach/magistrala/pkg/uuid/uuid.go similarity index 73% rename from vendor/github.com/mainflux/mainflux/pkg/uuid/uuid.go rename to vendor/github.com/absmach/magistrala/pkg/uuid/uuid.go index 0ae93b03..9a78d870 100644 --- a/vendor/github.com/mainflux/mainflux/pkg/uuid/uuid.go +++ b/vendor/github.com/absmach/magistrala/pkg/uuid/uuid.go @@ -1,24 +1,24 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 // Package uuid provides a UUID identity provider. package uuid import ( + "github.com/absmach/magistrala" + "github.com/absmach/magistrala/pkg/errors" "github.com/gofrs/uuid" - "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/pkg/errors" ) // ErrGeneratingID indicates error in generating UUID. var ErrGeneratingID = errors.New("failed to generate uuid") -var _ mainflux.IDProvider = (*uuidProvider)(nil) +var _ magistrala.IDProvider = (*uuidProvider)(nil) type uuidProvider struct{} // New instantiates a UUID provider. -func New() mainflux.IDProvider { +func New() magistrala.IDProvider { return &uuidProvider{} } diff --git a/vendor/github.com/mainflux/mainflux/uuid.go b/vendor/github.com/absmach/magistrala/uuid.go similarity index 80% rename from vendor/github.com/mainflux/mainflux/uuid.go rename to vendor/github.com/absmach/magistrala/uuid.go index 08fb802c..11c00a40 100644 --- a/vendor/github.com/mainflux/mainflux/uuid.go +++ b/vendor/github.com/absmach/magistrala/uuid.go @@ -1,7 +1,7 @@ -// Copyright (c) Mainflux +// Copyright (c) Magistrala // SPDX-License-Identifier: Apache-2.0 -package mainflux +package magistrala // IDProvider specifies an API for generating unique identifiers. type IDProvider interface { diff --git a/vendor/github.com/creack/pty/.gitignore b/vendor/github.com/creack/pty/.gitignore deleted file mode 100644 index 1f0a99f2..00000000 --- a/vendor/github.com/creack/pty/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -[568].out -_go* -_test* -_obj diff --git a/vendor/github.com/creack/pty/Dockerfile.golang b/vendor/github.com/creack/pty/Dockerfile.golang deleted file mode 100644 index 2ee82a3a..00000000 --- a/vendor/github.com/creack/pty/Dockerfile.golang +++ /dev/null @@ -1,17 +0,0 @@ -ARG GOVERSION=1.14 -FROM golang:${GOVERSION} - -# Set base env. -ARG GOOS=linux -ARG GOARCH=amd64 -ENV GOOS=${GOOS} GOARCH=${GOARCH} CGO_ENABLED=0 GOFLAGS='-v -ldflags=-s -ldflags=-w' - -# Pre compile the stdlib for 386/arm (32bits). -RUN go build -a std - -# Add the code to the image. -WORKDIR pty -ADD . . - -# Build the lib. -RUN go build diff --git a/vendor/github.com/creack/pty/Dockerfile.riscv b/vendor/github.com/creack/pty/Dockerfile.riscv deleted file mode 100644 index 7a30c94d..00000000 --- a/vendor/github.com/creack/pty/Dockerfile.riscv +++ /dev/null @@ -1,23 +0,0 @@ -# NOTE: Using 1.13 as a base to build the RISCV compiler, the resulting version is based on go1.6. -FROM golang:1.13 - -# Clone and complie a riscv compatible version of the go compiler. -RUN git clone https://review.gerrithub.io/riscv/riscv-go /riscv-go -# riscvdev branch HEAD as of 2019-06-29. -RUN cd /riscv-go && git checkout 04885fddd096d09d4450726064d06dd107e374bf -ENV PATH=/riscv-go/misc/riscv:/riscv-go/bin:$PATH -RUN cd /riscv-go/src && GOROOT_BOOTSTRAP=$(go env GOROOT) ./make.bash -ENV GOROOT=/riscv-go - -# Set the base env. -ENV GOOS=linux GOARCH=riscv CGO_ENABLED=0 GOFLAGS='-v -ldflags=-s -ldflags=-w' - -# Pre compile the stdlib. -RUN go build -a std - -# Add the code to the image. -WORKDIR pty -ADD . . - -# Build the lib. -RUN go build diff --git a/vendor/github.com/creack/pty/LICENSE b/vendor/github.com/creack/pty/LICENSE deleted file mode 100644 index 6b7558b6..00000000 --- a/vendor/github.com/creack/pty/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2011 Keith Rarick - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without limitation -the rights to use, copy, modify, merge, publish, distribute, -sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall -be included in all copies or substantial portions of the -Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY -KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS -OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR -OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/creack/pty/README.md b/vendor/github.com/creack/pty/README.md deleted file mode 100644 index a4fe7670..00000000 --- a/vendor/github.com/creack/pty/README.md +++ /dev/null @@ -1,107 +0,0 @@ -# pty - -Pty is a Go package for using unix pseudo-terminals. - -## Install - -```sh -go get github.com/creack/pty -``` - -## Examples - -Note that those examples are for demonstration purpose only, to showcase how to use the library. They are not meant to be used in any kind of production environment. - -### Command - -```go -package main - -import ( - "io" - "os" - "os/exec" - - "github.com/creack/pty" -) - -func main() { - c := exec.Command("grep", "--color=auto", "bar") - f, err := pty.Start(c) - if err != nil { - panic(err) - } - - go func() { - f.Write([]byte("foo\n")) - f.Write([]byte("bar\n")) - f.Write([]byte("baz\n")) - f.Write([]byte{4}) // EOT - }() - io.Copy(os.Stdout, f) -} -``` - -### Shell - -```go -package main - -import ( - "io" - "log" - "os" - "os/exec" - "os/signal" - "syscall" - - "github.com/creack/pty" - "golang.org/x/term" -) - -func test() error { - // Create arbitrary command. - c := exec.Command("bash") - - // Start the command with a pty. - ptmx, err := pty.Start(c) - if err != nil { - return err - } - // Make sure to close the pty at the end. - defer func() { _ = ptmx.Close() }() // Best effort. - - // Handle pty size. - ch := make(chan os.Signal, 1) - signal.Notify(ch, syscall.SIGWINCH) - go func() { - for range ch { - if err := pty.InheritSize(os.Stdin, ptmx); err != nil { - log.Printf("error resizing pty: %s", err) - } - } - }() - ch <- syscall.SIGWINCH // Initial resize. - defer func() { signal.Stop(ch); close(ch) }() // Cleanup signals when done. - - // Set stdin in raw mode. - oldState, err := term.MakeRaw(int(os.Stdin.Fd())) - if err != nil { - panic(err) - } - defer func() { _ = term.Restore(int(os.Stdin.Fd()), oldState) }() // Best effort. - - // Copy stdin to the pty and the pty to stdout. - // NOTE: The goroutine will keep reading until the next keystroke before returning. - go func() { _, _ = io.Copy(ptmx, os.Stdin) }() - _, _ = io.Copy(os.Stdout, ptmx) - - return nil -} - -func main() { - if err := test(); err != nil { - log.Fatal(err) - } -} -``` diff --git a/vendor/github.com/creack/pty/asm_solaris_amd64.s b/vendor/github.com/creack/pty/asm_solaris_amd64.s deleted file mode 100644 index 7fbef8ee..00000000 --- a/vendor/github.com/creack/pty/asm_solaris_amd64.s +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc -//+build gc - -#include "textflag.h" - -// -// System calls for amd64, Solaris are implemented in runtime/syscall_solaris.go -// - -TEXT ·sysvicall6(SB),NOSPLIT,$0-88 - JMP syscall·sysvicall6(SB) - -TEXT ·rawSysvicall6(SB),NOSPLIT,$0-88 - JMP syscall·rawSysvicall6(SB) diff --git a/vendor/github.com/creack/pty/doc.go b/vendor/github.com/creack/pty/doc.go deleted file mode 100644 index 3c8b3244..00000000 --- a/vendor/github.com/creack/pty/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Package pty provides functions for working with Unix terminals. -package pty - -import ( - "errors" - "os" -) - -// ErrUnsupported is returned if a function is not -// available on the current platform. -var ErrUnsupported = errors.New("unsupported") - -// Open a pty and its corresponding tty. -func Open() (pty, tty *os.File, err error) { - return open() -} diff --git a/vendor/github.com/creack/pty/ioctl.go b/vendor/github.com/creack/pty/ioctl.go deleted file mode 100644 index 3cabedd9..00000000 --- a/vendor/github.com/creack/pty/ioctl.go +++ /dev/null @@ -1,19 +0,0 @@ -//go:build !windows && !solaris && !aix -// +build !windows,!solaris,!aix - -package pty - -import "syscall" - -const ( - TIOCGWINSZ = syscall.TIOCGWINSZ - TIOCSWINSZ = syscall.TIOCSWINSZ -) - -func ioctl(fd, cmd, ptr uintptr) error { - _, _, e := syscall.Syscall(syscall.SYS_IOCTL, fd, cmd, ptr) - if e != 0 { - return e - } - return nil -} diff --git a/vendor/github.com/creack/pty/ioctl_bsd.go b/vendor/github.com/creack/pty/ioctl_bsd.go deleted file mode 100644 index db3bf845..00000000 --- a/vendor/github.com/creack/pty/ioctl_bsd.go +++ /dev/null @@ -1,40 +0,0 @@ -//go:build darwin || dragonfly || freebsd || netbsd || openbsd -// +build darwin dragonfly freebsd netbsd openbsd - -package pty - -// from -const ( - _IOC_VOID uintptr = 0x20000000 - _IOC_OUT uintptr = 0x40000000 - _IOC_IN uintptr = 0x80000000 - _IOC_IN_OUT uintptr = _IOC_OUT | _IOC_IN - _IOC_DIRMASK = _IOC_VOID | _IOC_OUT | _IOC_IN - - _IOC_PARAM_SHIFT = 13 - _IOC_PARAM_MASK = (1 << _IOC_PARAM_SHIFT) - 1 -) - -func _IOC_PARM_LEN(ioctl uintptr) uintptr { - return (ioctl >> 16) & _IOC_PARAM_MASK -} - -func _IOC(inout uintptr, group byte, ioctl_num uintptr, param_len uintptr) uintptr { - return inout | (param_len&_IOC_PARAM_MASK)<<16 | uintptr(group)<<8 | ioctl_num -} - -func _IO(group byte, ioctl_num uintptr) uintptr { - return _IOC(_IOC_VOID, group, ioctl_num, 0) -} - -func _IOR(group byte, ioctl_num uintptr, param_len uintptr) uintptr { - return _IOC(_IOC_OUT, group, ioctl_num, param_len) -} - -func _IOW(group byte, ioctl_num uintptr, param_len uintptr) uintptr { - return _IOC(_IOC_IN, group, ioctl_num, param_len) -} - -func _IOWR(group byte, ioctl_num uintptr, param_len uintptr) uintptr { - return _IOC(_IOC_IN_OUT, group, ioctl_num, param_len) -} diff --git a/vendor/github.com/creack/pty/ioctl_solaris.go b/vendor/github.com/creack/pty/ioctl_solaris.go deleted file mode 100644 index bff22dad..00000000 --- a/vendor/github.com/creack/pty/ioctl_solaris.go +++ /dev/null @@ -1,48 +0,0 @@ -//go:build solaris -// +build solaris - -package pty - -import ( - "syscall" - "unsafe" -) - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" -//go:linkname procioctl libc_ioctl -var procioctl uintptr - -const ( - // see /usr/include/sys/stropts.h - I_PUSH = uintptr((int32('S')<<8 | 002)) - I_STR = uintptr((int32('S')<<8 | 010)) - I_FIND = uintptr((int32('S')<<8 | 013)) - - // see /usr/include/sys/ptms.h - ISPTM = (int32('P') << 8) | 1 - UNLKPT = (int32('P') << 8) | 2 - PTSSTTY = (int32('P') << 8) | 3 - ZONEPT = (int32('P') << 8) | 4 - OWNERPT = (int32('P') << 8) | 5 - - // see /usr/include/sys/termios.h - TIOCSWINSZ = (uint32('T') << 8) | 103 - TIOCGWINSZ = (uint32('T') << 8) | 104 -) - -type strioctl struct { - icCmd int32 - icTimeout int32 - icLen int32 - icDP unsafe.Pointer -} - -// Defined in asm_solaris_amd64.s. -func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) - -func ioctl(fd, cmd, ptr uintptr) error { - if _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, fd, cmd, ptr, 0, 0, 0); errno != 0 { - return errno - } - return nil -} diff --git a/vendor/github.com/creack/pty/ioctl_unsupported.go b/vendor/github.com/creack/pty/ioctl_unsupported.go deleted file mode 100644 index 2449a27e..00000000 --- a/vendor/github.com/creack/pty/ioctl_unsupported.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build aix -// +build aix - -package pty - -const ( - TIOCGWINSZ = 0 - TIOCSWINSZ = 0 -) - -func ioctl(fd, cmd, ptr uintptr) error { - return ErrUnsupported -} diff --git a/vendor/github.com/creack/pty/mktypes.bash b/vendor/github.com/creack/pty/mktypes.bash deleted file mode 100644 index 7f71bda6..00000000 --- a/vendor/github.com/creack/pty/mktypes.bash +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash - -GOOSARCH="${GOOS}_${GOARCH}" -case "$GOOSARCH" in -_* | *_ | _) - echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2 - exit 1 - ;; -esac - -GODEFS="go tool cgo -godefs" - -$GODEFS types.go |gofmt > ztypes_$GOARCH.go - -case $GOOS in -freebsd|dragonfly|netbsd|openbsd) - $GODEFS types_$GOOS.go |gofmt > ztypes_$GOOSARCH.go - ;; -esac diff --git a/vendor/github.com/creack/pty/pty_darwin.go b/vendor/github.com/creack/pty/pty_darwin.go deleted file mode 100644 index 9bdd71d0..00000000 --- a/vendor/github.com/creack/pty/pty_darwin.go +++ /dev/null @@ -1,68 +0,0 @@ -//go:build darwin -// +build darwin - -package pty - -import ( - "errors" - "os" - "syscall" - "unsafe" -) - -func open() (pty, tty *os.File, err error) { - pFD, err := syscall.Open("/dev/ptmx", syscall.O_RDWR|syscall.O_CLOEXEC, 0) - if err != nil { - return nil, nil, err - } - p := os.NewFile(uintptr(pFD), "/dev/ptmx") - // In case of error after this point, make sure we close the ptmx fd. - defer func() { - if err != nil { - _ = p.Close() // Best effort. - } - }() - - sname, err := ptsname(p) - if err != nil { - return nil, nil, err - } - - if err := grantpt(p); err != nil { - return nil, nil, err - } - - if err := unlockpt(p); err != nil { - return nil, nil, err - } - - t, err := os.OpenFile(sname, os.O_RDWR|syscall.O_NOCTTY, 0) - if err != nil { - return nil, nil, err - } - return p, t, nil -} - -func ptsname(f *os.File) (string, error) { - n := make([]byte, _IOC_PARM_LEN(syscall.TIOCPTYGNAME)) - - err := ioctl(f.Fd(), syscall.TIOCPTYGNAME, uintptr(unsafe.Pointer(&n[0]))) - if err != nil { - return "", err - } - - for i, c := range n { - if c == 0 { - return string(n[:i]), nil - } - } - return "", errors.New("TIOCPTYGNAME string not NUL-terminated") -} - -func grantpt(f *os.File) error { - return ioctl(f.Fd(), syscall.TIOCPTYGRANT, 0) -} - -func unlockpt(f *os.File) error { - return ioctl(f.Fd(), syscall.TIOCPTYUNLK, 0) -} diff --git a/vendor/github.com/creack/pty/pty_dragonfly.go b/vendor/github.com/creack/pty/pty_dragonfly.go deleted file mode 100644 index aa916aad..00000000 --- a/vendor/github.com/creack/pty/pty_dragonfly.go +++ /dev/null @@ -1,83 +0,0 @@ -//go:build dragonfly -// +build dragonfly - -package pty - -import ( - "errors" - "os" - "strings" - "syscall" - "unsafe" -) - -// same code as pty_darwin.go -func open() (pty, tty *os.File, err error) { - p, err := os.OpenFile("/dev/ptmx", os.O_RDWR, 0) - if err != nil { - return nil, nil, err - } - // In case of error after this point, make sure we close the ptmx fd. - defer func() { - if err != nil { - _ = p.Close() // Best effort. - } - }() - - sname, err := ptsname(p) - if err != nil { - return nil, nil, err - } - - if err := grantpt(p); err != nil { - return nil, nil, err - } - - if err := unlockpt(p); err != nil { - return nil, nil, err - } - - t, err := os.OpenFile(sname, os.O_RDWR, 0) - if err != nil { - return nil, nil, err - } - return p, t, nil -} - -func grantpt(f *os.File) error { - _, err := isptmaster(f.Fd()) - return err -} - -func unlockpt(f *os.File) error { - _, err := isptmaster(f.Fd()) - return err -} - -func isptmaster(fd uintptr) (bool, error) { - err := ioctl(fd, syscall.TIOCISPTMASTER, 0) - return err == nil, err -} - -var ( - emptyFiodgnameArg fiodgnameArg - ioctl_FIODNAME = _IOW('f', 120, unsafe.Sizeof(emptyFiodgnameArg)) -) - -func ptsname(f *os.File) (string, error) { - name := make([]byte, _C_SPECNAMELEN) - fa := fiodgnameArg{Name: (*byte)(unsafe.Pointer(&name[0])), Len: _C_SPECNAMELEN, Pad_cgo_0: [4]byte{0, 0, 0, 0}} - - err := ioctl(f.Fd(), ioctl_FIODNAME, uintptr(unsafe.Pointer(&fa))) - if err != nil { - return "", err - } - - for i, c := range name { - if c == 0 { - s := "/dev/" + string(name[:i]) - return strings.Replace(s, "ptm", "pts", -1), nil - } - } - return "", errors.New("TIOCPTYGNAME string not NUL-terminated") -} diff --git a/vendor/github.com/creack/pty/pty_freebsd.go b/vendor/github.com/creack/pty/pty_freebsd.go deleted file mode 100644 index bcd3b6f9..00000000 --- a/vendor/github.com/creack/pty/pty_freebsd.go +++ /dev/null @@ -1,81 +0,0 @@ -//go:build freebsd -// +build freebsd - -package pty - -import ( - "errors" - "os" - "syscall" - "unsafe" -) - -func posixOpenpt(oflag int) (fd int, err error) { - r0, _, e1 := syscall.Syscall(syscall.SYS_POSIX_OPENPT, uintptr(oflag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = e1 - } - return fd, err -} - -func open() (pty, tty *os.File, err error) { - fd, err := posixOpenpt(syscall.O_RDWR | syscall.O_CLOEXEC) - if err != nil { - return nil, nil, err - } - p := os.NewFile(uintptr(fd), "/dev/pts") - // In case of error after this point, make sure we close the pts fd. - defer func() { - if err != nil { - _ = p.Close() // Best effort. - } - }() - - sname, err := ptsname(p) - if err != nil { - return nil, nil, err - } - - t, err := os.OpenFile("/dev/"+sname, os.O_RDWR, 0) - if err != nil { - return nil, nil, err - } - return p, t, nil -} - -func isptmaster(fd uintptr) (bool, error) { - err := ioctl(fd, syscall.TIOCPTMASTER, 0) - return err == nil, err -} - -var ( - emptyFiodgnameArg fiodgnameArg - ioctlFIODGNAME = _IOW('f', 120, unsafe.Sizeof(emptyFiodgnameArg)) -) - -func ptsname(f *os.File) (string, error) { - master, err := isptmaster(f.Fd()) - if err != nil { - return "", err - } - if !master { - return "", syscall.EINVAL - } - - const n = _C_SPECNAMELEN + 1 - var ( - buf = make([]byte, n) - arg = fiodgnameArg{Len: n, Buf: (*byte)(unsafe.Pointer(&buf[0]))} - ) - if err := ioctl(f.Fd(), ioctlFIODGNAME, uintptr(unsafe.Pointer(&arg))); err != nil { - return "", err - } - - for i, c := range buf { - if c == 0 { - return string(buf[:i]), nil - } - } - return "", errors.New("FIODGNAME string not NUL-terminated") -} diff --git a/vendor/github.com/creack/pty/pty_linux.go b/vendor/github.com/creack/pty/pty_linux.go deleted file mode 100644 index a3b368f5..00000000 --- a/vendor/github.com/creack/pty/pty_linux.go +++ /dev/null @@ -1,54 +0,0 @@ -//go:build linux -// +build linux - -package pty - -import ( - "os" - "strconv" - "syscall" - "unsafe" -) - -func open() (pty, tty *os.File, err error) { - p, err := os.OpenFile("/dev/ptmx", os.O_RDWR, 0) - if err != nil { - return nil, nil, err - } - // In case of error after this point, make sure we close the ptmx fd. - defer func() { - if err != nil { - _ = p.Close() // Best effort. - } - }() - - sname, err := ptsname(p) - if err != nil { - return nil, nil, err - } - - if err := unlockpt(p); err != nil { - return nil, nil, err - } - - t, err := os.OpenFile(sname, os.O_RDWR|syscall.O_NOCTTY, 0) //nolint:gosec // Expected Open from a variable. - if err != nil { - return nil, nil, err - } - return p, t, nil -} - -func ptsname(f *os.File) (string, error) { - var n _C_uint - err := ioctl(f.Fd(), syscall.TIOCGPTN, uintptr(unsafe.Pointer(&n))) //nolint:gosec // Expected unsafe pointer for Syscall call. - if err != nil { - return "", err - } - return "/dev/pts/" + strconv.Itoa(int(n)), nil -} - -func unlockpt(f *os.File) error { - var u _C_int - // use TIOCSPTLCK with a pointer to zero to clear the lock - return ioctl(f.Fd(), syscall.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) //nolint:gosec // Expected unsafe pointer for Syscall call. -} diff --git a/vendor/github.com/creack/pty/pty_netbsd.go b/vendor/github.com/creack/pty/pty_netbsd.go deleted file mode 100644 index 2b20d944..00000000 --- a/vendor/github.com/creack/pty/pty_netbsd.go +++ /dev/null @@ -1,69 +0,0 @@ -//go:build netbsd -// +build netbsd - -package pty - -import ( - "errors" - "os" - "syscall" - "unsafe" -) - -func open() (pty, tty *os.File, err error) { - p, err := os.OpenFile("/dev/ptmx", os.O_RDWR, 0) - if err != nil { - return nil, nil, err - } - // In case of error after this point, make sure we close the ptmx fd. - defer func() { - if err != nil { - _ = p.Close() // Best effort. - } - }() - - sname, err := ptsname(p) - if err != nil { - return nil, nil, err - } - - if err := grantpt(p); err != nil { - return nil, nil, err - } - - // In NetBSD unlockpt() does nothing, so it isn't called here. - - t, err := os.OpenFile(sname, os.O_RDWR|syscall.O_NOCTTY, 0) - if err != nil { - return nil, nil, err - } - return p, t, nil -} - -func ptsname(f *os.File) (string, error) { - /* - * from ptsname(3): The ptsname() function is equivalent to: - * struct ptmget pm; - * ioctl(fd, TIOCPTSNAME, &pm) == -1 ? NULL : pm.sn; - */ - var ptm ptmget - if err := ioctl(f.Fd(), uintptr(ioctl_TIOCPTSNAME), uintptr(unsafe.Pointer(&ptm))); err != nil { - return "", err - } - name := make([]byte, len(ptm.Sn)) - for i, c := range ptm.Sn { - name[i] = byte(c) - if c == 0 { - return string(name[:i]), nil - } - } - return "", errors.New("TIOCPTSNAME string not NUL-terminated") -} - -func grantpt(f *os.File) error { - /* - * from grantpt(3): Calling grantpt() is equivalent to: - * ioctl(fd, TIOCGRANTPT, 0); - */ - return ioctl(f.Fd(), uintptr(ioctl_TIOCGRANTPT), 0) -} diff --git a/vendor/github.com/creack/pty/pty_openbsd.go b/vendor/github.com/creack/pty/pty_openbsd.go deleted file mode 100644 index 031367a8..00000000 --- a/vendor/github.com/creack/pty/pty_openbsd.go +++ /dev/null @@ -1,36 +0,0 @@ -//go:build openbsd -// +build openbsd - -package pty - -import ( - "os" - "syscall" - "unsafe" -) - -func open() (pty, tty *os.File, err error) { - /* - * from ptm(4): - * The PTMGET command allocates a free pseudo terminal, changes its - * ownership to the caller, revokes the access privileges for all previous - * users, opens the file descriptors for the pty and tty devices and - * returns them to the caller in struct ptmget. - */ - - p, err := os.OpenFile("/dev/ptm", os.O_RDWR|syscall.O_CLOEXEC, 0) - if err != nil { - return nil, nil, err - } - defer p.Close() - - var ptm ptmget - if err := ioctl(p.Fd(), uintptr(ioctl_PTMGET), uintptr(unsafe.Pointer(&ptm))); err != nil { - return nil, nil, err - } - - pty = os.NewFile(uintptr(ptm.Cfd), "/dev/ptm") - tty = os.NewFile(uintptr(ptm.Sfd), "/dev/ptm") - - return pty, tty, nil -} diff --git a/vendor/github.com/creack/pty/pty_solaris.go b/vendor/github.com/creack/pty/pty_solaris.go deleted file mode 100644 index 37f933e6..00000000 --- a/vendor/github.com/creack/pty/pty_solaris.go +++ /dev/null @@ -1,152 +0,0 @@ -//go:build solaris -// +build solaris - -package pty - -/* based on: -http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libc/port/gen/pt.c -*/ - -import ( - "errors" - "os" - "strconv" - "syscall" - "unsafe" -) - -func open() (pty, tty *os.File, err error) { - ptmxfd, err := syscall.Open("/dev/ptmx", syscall.O_RDWR|syscall.O_NOCTTY, 0) - if err != nil { - return nil, nil, err - } - p := os.NewFile(uintptr(ptmxfd), "/dev/ptmx") - // In case of error after this point, make sure we close the ptmx fd. - defer func() { - if err != nil { - _ = p.Close() // Best effort. - } - }() - - sname, err := ptsname(p) - if err != nil { - return nil, nil, err - } - - if err := grantpt(p); err != nil { - return nil, nil, err - } - - if err := unlockpt(p); err != nil { - return nil, nil, err - } - - ptsfd, err := syscall.Open(sname, os.O_RDWR|syscall.O_NOCTTY, 0) - if err != nil { - return nil, nil, err - } - t := os.NewFile(uintptr(ptsfd), sname) - - // In case of error after this point, make sure we close the pts fd. - defer func() { - if err != nil { - _ = t.Close() // Best effort. - } - }() - - // pushing terminal driver STREAMS modules as per pts(7) - for _, mod := range []string{"ptem", "ldterm", "ttcompat"} { - if err := streamsPush(t, mod); err != nil { - return nil, nil, err - } - } - - return p, t, nil -} - -func ptsname(f *os.File) (string, error) { - dev, err := ptsdev(f.Fd()) - if err != nil { - return "", err - } - fn := "/dev/pts/" + strconv.FormatInt(int64(dev), 10) - - if err := syscall.Access(fn, 0); err != nil { - return "", err - } - return fn, nil -} - -func unlockpt(f *os.File) error { - istr := strioctl{ - icCmd: UNLKPT, - icTimeout: 0, - icLen: 0, - icDP: nil, - } - return ioctl(f.Fd(), I_STR, uintptr(unsafe.Pointer(&istr))) -} - -func minor(x uint64) uint64 { return x & 0377 } - -func ptsdev(fd uintptr) (uint64, error) { - istr := strioctl{ - icCmd: ISPTM, - icTimeout: 0, - icLen: 0, - icDP: nil, - } - - if err := ioctl(fd, I_STR, uintptr(unsafe.Pointer(&istr))); err != nil { - return 0, err - } - var status syscall.Stat_t - if err := syscall.Fstat(int(fd), &status); err != nil { - return 0, err - } - return uint64(minor(status.Rdev)), nil -} - -type ptOwn struct { - rUID int32 - rGID int32 -} - -func grantpt(f *os.File) error { - if _, err := ptsdev(f.Fd()); err != nil { - return err - } - pto := ptOwn{ - rUID: int32(os.Getuid()), - // XXX should first attempt to get gid of DEFAULT_TTY_GROUP="tty" - rGID: int32(os.Getgid()), - } - istr := strioctl{ - icCmd: OWNERPT, - icTimeout: 0, - icLen: int32(unsafe.Sizeof(strioctl{})), - icDP: unsafe.Pointer(&pto), - } - if err := ioctl(f.Fd(), I_STR, uintptr(unsafe.Pointer(&istr))); err != nil { - return errors.New("access denied") - } - return nil -} - -// streamsPush pushes STREAMS modules if not already done so. -func streamsPush(f *os.File, mod string) error { - buf := []byte(mod) - - // XXX I_FIND is not returning an error when the module - // is already pushed even though truss reports a return - // value of 1. A bug in the Go Solaris syscall interface? - // XXX without this we are at risk of the issue - // https://www.illumos.org/issues/9042 - // but since we are not using libc or XPG4.2, we should not be - // double-pushing modules - - if err := ioctl(f.Fd(), I_FIND, uintptr(unsafe.Pointer(&buf[0]))); err != nil { - return nil - } - return ioctl(f.Fd(), I_PUSH, uintptr(unsafe.Pointer(&buf[0]))) -} diff --git a/vendor/github.com/creack/pty/pty_unsupported.go b/vendor/github.com/creack/pty/pty_unsupported.go deleted file mode 100644 index c771020f..00000000 --- a/vendor/github.com/creack/pty/pty_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build !linux && !darwin && !freebsd && !dragonfly && !netbsd && !openbsd && !solaris -// +build !linux,!darwin,!freebsd,!dragonfly,!netbsd,!openbsd,!solaris - -package pty - -import ( - "os" -) - -func open() (pty, tty *os.File, err error) { - return nil, nil, ErrUnsupported -} diff --git a/vendor/github.com/creack/pty/run.go b/vendor/github.com/creack/pty/run.go deleted file mode 100644 index 47553662..00000000 --- a/vendor/github.com/creack/pty/run.go +++ /dev/null @@ -1,57 +0,0 @@ -package pty - -import ( - "os" - "os/exec" - "syscall" -) - -// Start assigns a pseudo-terminal tty os.File to c.Stdin, c.Stdout, -// and c.Stderr, calls c.Start, and returns the File of the tty's -// corresponding pty. -// -// Starts the process in a new session and sets the controlling terminal. -func Start(cmd *exec.Cmd) (*os.File, error) { - return StartWithSize(cmd, nil) -} - -// StartWithAttrs assigns a pseudo-terminal tty os.File to c.Stdin, c.Stdout, -// and c.Stderr, calls c.Start, and returns the File of the tty's -// corresponding pty. -// -// This will resize the pty to the specified size before starting the command if a size is provided. -// The `attrs` parameter overrides the one set in c.SysProcAttr. -// -// This should generally not be needed. Used in some edge cases where it is needed to create a pty -// without a controlling terminal. -func StartWithAttrs(c *exec.Cmd, sz *Winsize, attrs *syscall.SysProcAttr) (*os.File, error) { - pty, tty, err := Open() - if err != nil { - return nil, err - } - defer func() { _ = tty.Close() }() // Best effort. - - if sz != nil { - if err := Setsize(pty, sz); err != nil { - _ = pty.Close() // Best effort. - return nil, err - } - } - if c.Stdout == nil { - c.Stdout = tty - } - if c.Stderr == nil { - c.Stderr = tty - } - if c.Stdin == nil { - c.Stdin = tty - } - - c.SysProcAttr = attrs - - if err := c.Start(); err != nil { - _ = pty.Close() // Best effort. - return nil, err - } - return pty, err -} diff --git a/vendor/github.com/creack/pty/start.go b/vendor/github.com/creack/pty/start.go deleted file mode 100644 index 9b51635f..00000000 --- a/vendor/github.com/creack/pty/start.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build !windows -// +build !windows - -package pty - -import ( - "os" - "os/exec" - "syscall" -) - -// StartWithSize assigns a pseudo-terminal tty os.File to c.Stdin, c.Stdout, -// and c.Stderr, calls c.Start, and returns the File of the tty's -// corresponding pty. -// -// This will resize the pty to the specified size before starting the command. -// Starts the process in a new session and sets the controlling terminal. -func StartWithSize(cmd *exec.Cmd, ws *Winsize) (*os.File, error) { - if cmd.SysProcAttr == nil { - cmd.SysProcAttr = &syscall.SysProcAttr{} - } - cmd.SysProcAttr.Setsid = true - cmd.SysProcAttr.Setctty = true - return StartWithAttrs(cmd, ws, cmd.SysProcAttr) -} diff --git a/vendor/github.com/creack/pty/start_windows.go b/vendor/github.com/creack/pty/start_windows.go deleted file mode 100644 index 7e9530ba..00000000 --- a/vendor/github.com/creack/pty/start_windows.go +++ /dev/null @@ -1,19 +0,0 @@ -//go:build windows -// +build windows - -package pty - -import ( - "os" - "os/exec" -) - -// StartWithSize assigns a pseudo-terminal tty os.File to c.Stdin, c.Stdout, -// and c.Stderr, calls c.Start, and returns the File of the tty's -// corresponding pty. -// -// This will resize the pty to the specified size before starting the command. -// Starts the process in a new session and sets the controlling terminal. -func StartWithSize(cmd *exec.Cmd, ws *Winsize) (*os.File, error) { - return nil, ErrUnsupported -} diff --git a/vendor/github.com/creack/pty/test_crosscompile.sh b/vendor/github.com/creack/pty/test_crosscompile.sh deleted file mode 100644 index 47e8b106..00000000 --- a/vendor/github.com/creack/pty/test_crosscompile.sh +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env sh - -# Test script checking that all expected os/arch compile properly. -# Does not actually test the logic, just the compilation so we make sure we don't break code depending on the lib. - -echo2() { - echo $@ >&2 -} - -trap end 0 -end() { - [ "$?" = 0 ] && echo2 "Pass." || (echo2 "Fail."; exit 1) -} - -cross() { - os=$1 - shift - echo2 "Build for $os." - for arch in $@; do - echo2 " - $os/$arch" - GOOS=$os GOARCH=$arch go build - done - echo2 -} - -set -e - -cross linux amd64 386 arm arm64 ppc64 ppc64le s390x mips mipsle mips64 mips64le -cross darwin amd64 arm64 -cross freebsd amd64 386 arm arm64 -cross netbsd amd64 386 arm arm64 -cross openbsd amd64 386 arm arm64 -cross dragonfly amd64 -cross solaris amd64 - -# Not expected to work but should still compile. -cross windows amd64 386 arm - -# TODO: Fix compilation error on openbsd/arm. -# TODO: Merge the solaris PR. - -# Some os/arch require a different compiler. Run in docker. -if ! hash docker; then - # If docker is not present, stop here. - return -fi - -echo2 "Build for linux." -echo2 " - linux/riscv" -docker build -t creack-pty-test -f Dockerfile.riscv . - -# Golang dropped support for darwin 32bits since go1.15. Make sure the lib still compile with go1.14 on those archs. -echo2 "Build for darwin (32bits)." -echo2 " - darwin/386" -docker build -t creack-pty-test -f Dockerfile.golang --build-arg=GOVERSION=1.14 --build-arg=GOOS=darwin --build-arg=GOARCH=386 . -echo2 " - darwin/arm" -docker build -t creack-pty-test -f Dockerfile.golang --build-arg=GOVERSION=1.14 --build-arg=GOOS=darwin --build-arg=GOARCH=arm . - -# Run a single test for an old go version. Would be best with go1.0, but not available on Dockerhub. -# Using 1.6 as it is the base version for the RISCV compiler. -# Would also be better to run all the tests, not just one, need to refactor this file to allow for specifc archs per version. -echo2 "Build for linux - go1.6." -echo2 " - linux/amd64" -docker build -t creack-pty-test -f Dockerfile.golang --build-arg=GOVERSION=1.6 --build-arg=GOOS=linux --build-arg=GOARCH=amd64 . diff --git a/vendor/github.com/creack/pty/winsize.go b/vendor/github.com/creack/pty/winsize.go deleted file mode 100644 index 57323f40..00000000 --- a/vendor/github.com/creack/pty/winsize.go +++ /dev/null @@ -1,27 +0,0 @@ -package pty - -import "os" - -// InheritSize applies the terminal size of pty to tty. This should be run -// in a signal handler for syscall.SIGWINCH to automatically resize the tty when -// the pty receives a window size change notification. -func InheritSize(pty, tty *os.File) error { - size, err := GetsizeFull(pty) - if err != nil { - return err - } - if err := Setsize(tty, size); err != nil { - return err - } - return nil -} - -// Getsize returns the number of rows (lines) and cols (positions -// in each line) in terminal t. -func Getsize(t *os.File) (rows, cols int, err error) { - ws, err := GetsizeFull(t) - if err != nil { - return 0, 0, err - } - return int(ws.Rows), int(ws.Cols), nil -} diff --git a/vendor/github.com/creack/pty/winsize_unix.go b/vendor/github.com/creack/pty/winsize_unix.go deleted file mode 100644 index 5d99c3dd..00000000 --- a/vendor/github.com/creack/pty/winsize_unix.go +++ /dev/null @@ -1,35 +0,0 @@ -//go:build !windows -// +build !windows - -package pty - -import ( - "os" - "syscall" - "unsafe" -) - -// Winsize describes the terminal size. -type Winsize struct { - Rows uint16 // ws_row: Number of rows (in cells) - Cols uint16 // ws_col: Number of columns (in cells) - X uint16 // ws_xpixel: Width in pixels - Y uint16 // ws_ypixel: Height in pixels -} - -// Setsize resizes t to s. -func Setsize(t *os.File, ws *Winsize) error { - //nolint:gosec // Expected unsafe pointer for Syscall call. - return ioctl(t.Fd(), syscall.TIOCSWINSZ, uintptr(unsafe.Pointer(ws))) -} - -// GetsizeFull returns the full terminal size description. -func GetsizeFull(t *os.File) (size *Winsize, err error) { - var ws Winsize - - //nolint:gosec // Expected unsafe pointer for Syscall call. - if err := ioctl(t.Fd(), syscall.TIOCGWINSZ, uintptr(unsafe.Pointer(&ws))); err != nil { - return nil, err - } - return &ws, nil -} diff --git a/vendor/github.com/creack/pty/winsize_unsupported.go b/vendor/github.com/creack/pty/winsize_unsupported.go deleted file mode 100644 index 0d210993..00000000 --- a/vendor/github.com/creack/pty/winsize_unsupported.go +++ /dev/null @@ -1,23 +0,0 @@ -//go:build windows -// +build windows - -package pty - -import ( - "os" -) - -// Winsize is a dummy struct to enable compilation on unsupported platforms. -type Winsize struct { - Rows, Cols, X, Y uint16 -} - -// Setsize resizes t to s. -func Setsize(*os.File, *Winsize) error { - return ErrUnsupported -} - -// GetsizeFull returns the full terminal size description. -func GetsizeFull(*os.File) (*Winsize, error) { - return nil, ErrUnsupported -} diff --git a/vendor/github.com/creack/pty/ztypes_386.go b/vendor/github.com/creack/pty/ztypes_386.go deleted file mode 100644 index d126f4aa..00000000 --- a/vendor/github.com/creack/pty/ztypes_386.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build 386 -// +build 386 - -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types.go - -package pty - -type ( - _C_int int32 - _C_uint uint32 -) diff --git a/vendor/github.com/creack/pty/ztypes_amd64.go b/vendor/github.com/creack/pty/ztypes_amd64.go deleted file mode 100644 index 6c4a7677..00000000 --- a/vendor/github.com/creack/pty/ztypes_amd64.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build amd64 -// +build amd64 - -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types.go - -package pty - -type ( - _C_int int32 - _C_uint uint32 -) diff --git a/vendor/github.com/creack/pty/ztypes_arm.go b/vendor/github.com/creack/pty/ztypes_arm.go deleted file mode 100644 index de6fe160..00000000 --- a/vendor/github.com/creack/pty/ztypes_arm.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build arm -// +build arm - -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types.go - -package pty - -type ( - _C_int int32 - _C_uint uint32 -) diff --git a/vendor/github.com/creack/pty/ztypes_arm64.go b/vendor/github.com/creack/pty/ztypes_arm64.go deleted file mode 100644 index c4f315ca..00000000 --- a/vendor/github.com/creack/pty/ztypes_arm64.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build arm64 -// +build arm64 - -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types.go - -package pty - -type ( - _C_int int32 - _C_uint uint32 -) diff --git a/vendor/github.com/creack/pty/ztypes_dragonfly_amd64.go b/vendor/github.com/creack/pty/ztypes_dragonfly_amd64.go deleted file mode 100644 index 183c4214..00000000 --- a/vendor/github.com/creack/pty/ztypes_dragonfly_amd64.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build amd64 && dragonfly -// +build amd64,dragonfly - -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_dragonfly.go - -package pty - -const ( - _C_SPECNAMELEN = 0x3f -) - -type fiodgnameArg struct { - Name *byte - Len uint32 - Pad_cgo_0 [4]byte -} diff --git a/vendor/github.com/creack/pty/ztypes_freebsd_386.go b/vendor/github.com/creack/pty/ztypes_freebsd_386.go deleted file mode 100644 index d80dbf71..00000000 --- a/vendor/github.com/creack/pty/ztypes_freebsd_386.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build 386 && freebsd -// +build 386,freebsd - -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_freebsd.go - -package pty - -const ( - _C_SPECNAMELEN = 0x3f -) - -type fiodgnameArg struct { - Len int32 - Buf *byte -} diff --git a/vendor/github.com/creack/pty/ztypes_freebsd_amd64.go b/vendor/github.com/creack/pty/ztypes_freebsd_amd64.go deleted file mode 100644 index bfab4e45..00000000 --- a/vendor/github.com/creack/pty/ztypes_freebsd_amd64.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build amd64 && freebsd -// +build amd64,freebsd - -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_freebsd.go - -package pty - -const ( - _C_SPECNAMELEN = 0x3f -) - -type fiodgnameArg struct { - Len int32 - Pad_cgo_0 [4]byte - Buf *byte -} diff --git a/vendor/github.com/creack/pty/ztypes_freebsd_arm.go b/vendor/github.com/creack/pty/ztypes_freebsd_arm.go deleted file mode 100644 index 3a8aeae3..00000000 --- a/vendor/github.com/creack/pty/ztypes_freebsd_arm.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build arm && freebsd -// +build arm,freebsd - -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_freebsd.go - -package pty - -const ( - _C_SPECNAMELEN = 0x3f -) - -type fiodgnameArg struct { - Len int32 - Buf *byte -} diff --git a/vendor/github.com/creack/pty/ztypes_freebsd_arm64.go b/vendor/github.com/creack/pty/ztypes_freebsd_arm64.go deleted file mode 100644 index a8392491..00000000 --- a/vendor/github.com/creack/pty/ztypes_freebsd_arm64.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build arm64 && freebsd -// +build arm64,freebsd - -// Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs types_freebsd.go - -package pty - -const ( - _C_SPECNAMELEN = 0xff -) - -type fiodgnameArg struct { - Len int32 - Buf *byte -} diff --git a/vendor/github.com/creack/pty/ztypes_freebsd_ppc64.go b/vendor/github.com/creack/pty/ztypes_freebsd_ppc64.go deleted file mode 100644 index 5fa102fc..00000000 --- a/vendor/github.com/creack/pty/ztypes_freebsd_ppc64.go +++ /dev/null @@ -1,14 +0,0 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_freebsd.go - -package pty - -const ( - _C_SPECNAMELEN = 0x3f -) - -type fiodgnameArg struct { - Len int32 - Pad_cgo_0 [4]byte - Buf *byte -} diff --git a/vendor/github.com/creack/pty/ztypes_loong64.go b/vendor/github.com/creack/pty/ztypes_loong64.go deleted file mode 100644 index 3beb5c17..00000000 --- a/vendor/github.com/creack/pty/ztypes_loong64.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build loong64 -// +build loong64 - -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types.go - -package pty - -type ( - _C_int int32 - _C_uint uint32 -) diff --git a/vendor/github.com/creack/pty/ztypes_mipsx.go b/vendor/github.com/creack/pty/ztypes_mipsx.go deleted file mode 100644 index 28127797..00000000 --- a/vendor/github.com/creack/pty/ztypes_mipsx.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build (mips || mipsle || mips64 || mips64le) && linux -// +build mips mipsle mips64 mips64le -// +build linux - -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types.go - -package pty - -type ( - _C_int int32 - _C_uint uint32 -) diff --git a/vendor/github.com/creack/pty/ztypes_netbsd_32bit_int.go b/vendor/github.com/creack/pty/ztypes_netbsd_32bit_int.go deleted file mode 100644 index 2ab7c455..00000000 --- a/vendor/github.com/creack/pty/ztypes_netbsd_32bit_int.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build (386 || amd64 || arm || arm64) && netbsd -// +build 386 amd64 arm arm64 -// +build netbsd - -package pty - -type ptmget struct { - Cfd int32 - Sfd int32 - Cn [1024]int8 - Sn [1024]int8 -} - -var ( - ioctl_TIOCPTSNAME = 0x48087448 - ioctl_TIOCGRANTPT = 0x20007447 -) diff --git a/vendor/github.com/creack/pty/ztypes_openbsd_32bit_int.go b/vendor/github.com/creack/pty/ztypes_openbsd_32bit_int.go deleted file mode 100644 index 1eb09481..00000000 --- a/vendor/github.com/creack/pty/ztypes_openbsd_32bit_int.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build (386 || amd64 || arm || arm64 || mips64) && openbsd -// +build 386 amd64 arm arm64 mips64 -// +build openbsd - -package pty - -type ptmget struct { - Cfd int32 - Sfd int32 - Cn [16]int8 - Sn [16]int8 -} - -var ioctl_PTMGET = 0x40287401 diff --git a/vendor/github.com/creack/pty/ztypes_ppc64.go b/vendor/github.com/creack/pty/ztypes_ppc64.go deleted file mode 100644 index bbb3da83..00000000 --- a/vendor/github.com/creack/pty/ztypes_ppc64.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build ppc64 -// +build ppc64 - -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types.go - -package pty - -type ( - _C_int int32 - _C_uint uint32 -) diff --git a/vendor/github.com/creack/pty/ztypes_ppc64le.go b/vendor/github.com/creack/pty/ztypes_ppc64le.go deleted file mode 100644 index 8a4fac3e..00000000 --- a/vendor/github.com/creack/pty/ztypes_ppc64le.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build ppc64le -// +build ppc64le - -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types.go - -package pty - -type ( - _C_int int32 - _C_uint uint32 -) diff --git a/vendor/github.com/creack/pty/ztypes_riscvx.go b/vendor/github.com/creack/pty/ztypes_riscvx.go deleted file mode 100644 index dc5da905..00000000 --- a/vendor/github.com/creack/pty/ztypes_riscvx.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build riscv || riscv64 -// +build riscv riscv64 - -// Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs types.go - -package pty - -type ( - _C_int int32 - _C_uint uint32 -) diff --git a/vendor/github.com/creack/pty/ztypes_s390x.go b/vendor/github.com/creack/pty/ztypes_s390x.go deleted file mode 100644 index 3433be7c..00000000 --- a/vendor/github.com/creack/pty/ztypes_s390x.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build s390x -// +build s390x - -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types.go - -package pty - -type ( - _C_int int32 - _C_uint uint32 -) diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/.gitignore b/vendor/github.com/eclipse/paho.mqtt.golang/.gitignore deleted file mode 100644 index 47bb0de4..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/.gitignore +++ /dev/null @@ -1,36 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -*.msg -*.lok - -samples/trivial -samples/trivial2 -samples/sample -samples/reconnect -samples/ssl -samples/custom_store -samples/simple -samples/stdinpub -samples/stdoutsub -samples/routing \ No newline at end of file diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/CONTRIBUTING.md b/vendor/github.com/eclipse/paho.mqtt.golang/CONTRIBUTING.md deleted file mode 100644 index 9791dc60..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/CONTRIBUTING.md +++ /dev/null @@ -1,56 +0,0 @@ -Contributing to Paho -==================== - -Thanks for your interest in this project. - -Project description: --------------------- - -The Paho project has been created to provide scalable open-source implementations of open and standard messaging protocols aimed at new, existing, and emerging applications for Machine-to-Machine (M2M) and Internet of Things (IoT). -Paho reflects the inherent physical and cost constraints of device connectivity. Its objectives include effective levels of decoupling between devices and applications, designed to keep markets open and encourage the rapid growth of scalable Web and Enterprise middleware and applications. Paho is being kicked off with MQTT publish/subscribe client implementations for use on embedded platforms, along with corresponding server support as determined by the community. - -- https://projects.eclipse.org/projects/technology.paho - -Developer resources: --------------------- - -Information regarding source code management, builds, coding standards, and more. - -- https://projects.eclipse.org/projects/technology.paho/developer - -Contributor License Agreement: ------------------------------- - -Before your contribution can be accepted by the project, you need to create and electronically sign the Eclipse Foundation Contributor License Agreement (CLA). - -- http://www.eclipse.org/legal/CLA.php - -Contributing Code: ------------------- - -The Go client is developed in Github, see their documentation on the process of forking and pull requests; https://help.github.com/categories/collaborating-on-projects-using-pull-requests/ - -Git commit messages should follow the style described here; - -http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html - -Contact: --------- - -Contact the project developers via the project's "dev" list. - -- https://dev.eclipse.org/mailman/listinfo/paho-dev - -Search for bugs: ----------------- - -This project uses Github issues to track ongoing development and issues. - -- https://github.com/eclipse/paho.mqtt.golang/issues - -Create a new bug: ------------------ - -Be sure to search for existing bugs before you create another one. Remember that contributions are always welcome! - -- https://github.com/eclipse/paho.mqtt.golang/issues diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/LICENSE b/vendor/github.com/eclipse/paho.mqtt.golang/LICENSE deleted file mode 100644 index f55c3953..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/LICENSE +++ /dev/null @@ -1,294 +0,0 @@ -Eclipse Public License - v 2.0 (EPL-2.0) - -This program and the accompanying materials -are made available under the terms of the Eclipse Public License v2.0 -and Eclipse Distribution License v1.0 which accompany this distribution. - -The Eclipse Public License is available at - https://www.eclipse.org/legal/epl-2.0/ -and the Eclipse Distribution License is available at - http://www.eclipse.org/org/documents/edl-v10.php. - -For an explanation of what dual-licensing means to you, see: -https://www.eclipse.org/legal/eplfaq.php#DUALLIC - -**** -The epl-2.0 is copied below in order to pass the pkg.go.dev license check (https://pkg.go.dev/license-policy). -**** -Eclipse Public License - v 2.0 - - THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE - PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION - OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. - -1. DEFINITIONS - -"Contribution" means: - - a) in the case of the initial Contributor, the initial content - Distributed under this Agreement, and - - b) in the case of each subsequent Contributor: - i) changes to the Program, and - ii) additions to the Program; - where such changes and/or additions to the Program originate from - and are Distributed by that particular Contributor. A Contribution - "originates" from a Contributor if it was added to the Program by - such Contributor itself or anyone acting on such Contributor's behalf. - Contributions do not include changes or additions to the Program that - are not Modified Works. - -"Contributor" means any person or entity that Distributes the Program. - -"Licensed Patents" mean patent claims licensable by a Contributor which -are necessarily infringed by the use or sale of its Contribution alone -or when combined with the Program. - -"Program" means the Contributions Distributed in accordance with this -Agreement. - -"Recipient" means anyone who receives the Program under this Agreement -or any Secondary License (as applicable), including Contributors. - -"Derivative Works" shall mean any work, whether in Source Code or other -form, that is based on (or derived from) the Program and for which the -editorial revisions, annotations, elaborations, or other modifications -represent, as a whole, an original work of authorship. - -"Modified Works" shall mean any work in Source Code or other form that -results from an addition to, deletion from, or modification of the -contents of the Program, including, for purposes of clarity any new file -in Source Code form that contains any contents of the Program. Modified -Works shall not include works that contain only declarations, -interfaces, types, classes, structures, or files of the Program solely -in each case in order to link to, bind by name, or subclass the Program -or Modified Works thereof. - -"Distribute" means the acts of a) distributing or b) making available -in any manner that enables the transfer of a copy. - -"Source Code" means the form of a Program preferred for making -modifications, including but not limited to software source code, -documentation source, and configuration files. - -"Secondary License" means either the GNU General Public License, -Version 2.0, or any later versions of that license, including any -exceptions or additional permissions as identified by the initial -Contributor. - -2. GRANT OF RIGHTS - - a) Subject to the terms of this Agreement, each Contributor hereby - grants Recipient a non-exclusive, worldwide, royalty-free copyright - license to reproduce, prepare Derivative Works of, publicly display, - publicly perform, Distribute and sublicense the Contribution of such - Contributor, if any, and such Derivative Works. - - b) Subject to the terms of this Agreement, each Contributor hereby - grants Recipient a non-exclusive, worldwide, royalty-free patent - license under Licensed Patents to make, use, sell, offer to sell, - import and otherwise transfer the Contribution of such Contributor, - if any, in Source Code or other form. This patent license shall - apply to the combination of the Contribution and the Program if, at - the time the Contribution is added by the Contributor, such addition - of the Contribution causes such combination to be covered by the - Licensed Patents. The patent license shall not apply to any other - combinations which include the Contribution. No hardware per se is - licensed hereunder. - - c) Recipient understands that although each Contributor grants the - licenses to its Contributions set forth herein, no assurances are - provided by any Contributor that the Program does not infringe the - patent or other intellectual property rights of any other entity. - Each Contributor disclaims any liability to Recipient for claims - brought by any other entity based on infringement of intellectual - property rights or otherwise. As a condition to exercising the - rights and licenses granted hereunder, each Recipient hereby - assumes sole responsibility to secure any other intellectual - property rights needed, if any. For example, if a third party - patent license is required to allow Recipient to Distribute the - Program, it is Recipient's responsibility to acquire that license - before distributing the Program. - - d) Each Contributor represents that to its knowledge it has - sufficient copyright rights in its Contribution, if any, to grant - the copyright license set forth in this Agreement. - - e) Notwithstanding the terms of any Secondary License, no - Contributor makes additional grants to any Recipient (other than - those set forth in this Agreement) as a result of such Recipient's - receipt of the Program under the terms of a Secondary License - (if permitted under the terms of Section 3). - -3. REQUIREMENTS - -3.1 If a Contributor Distributes the Program in any form, then: - - a) the Program must also be made available as Source Code, in - accordance with section 3.2, and the Contributor must accompany - the Program with a statement that the Source Code for the Program - is available under this Agreement, and informs Recipients how to - obtain it in a reasonable manner on or through a medium customarily - used for software exchange; and - - b) the Contributor may Distribute the Program under a license - different than this Agreement, provided that such license: - i) effectively disclaims on behalf of all other Contributors all - warranties and conditions, express and implied, including - warranties or conditions of title and non-infringement, and - implied warranties or conditions of merchantability and fitness - for a particular purpose; - - ii) effectively excludes on behalf of all other Contributors all - liability for damages, including direct, indirect, special, - incidental and consequential damages, such as lost profits; - - iii) does not attempt to limit or alter the recipients' rights - in the Source Code under section 3.2; and - - iv) requires any subsequent distribution of the Program by any - party to be under a license that satisfies the requirements - of this section 3. - -3.2 When the Program is Distributed as Source Code: - - a) it must be made available under this Agreement, or if the - Program (i) is combined with other material in a separate file or - files made available under a Secondary License, and (ii) the initial - Contributor attached to the Source Code the notice described in - Exhibit A of this Agreement, then the Program may be made available - under the terms of such Secondary Licenses, and - - b) a copy of this Agreement must be included with each copy of - the Program. - -3.3 Contributors may not remove or alter any copyright, patent, -trademark, attribution notices, disclaimers of warranty, or limitations -of liability ("notices") contained within the Program from any copy of -the Program which they Distribute, provided that Contributors may add -their own appropriate notices. - -4. COMMERCIAL DISTRIBUTION - -Commercial distributors of software may accept certain responsibilities -with respect to end users, business partners and the like. While this -license is intended to facilitate the commercial use of the Program, -the Contributor who includes the Program in a commercial product -offering should do so in a manner which does not create potential -liability for other Contributors. Therefore, if a Contributor includes -the Program in a commercial product offering, such Contributor -("Commercial Contributor") hereby agrees to defend and indemnify every -other Contributor ("Indemnified Contributor") against any losses, -damages and costs (collectively "Losses") arising from claims, lawsuits -and other legal actions brought by a third party against the Indemnified -Contributor to the extent caused by the acts or omissions of such -Commercial Contributor in connection with its distribution of the Program -in a commercial product offering. The obligations in this section do not -apply to any claims or Losses relating to any actual or alleged -intellectual property infringement. In order to qualify, an Indemnified -Contributor must: a) promptly notify the Commercial Contributor in -writing of such claim, and b) allow the Commercial Contributor to control, -and cooperate with the Commercial Contributor in, the defense and any -related settlement negotiations. The Indemnified Contributor may -participate in any such claim at its own expense. - -For example, a Contributor might include the Program in a commercial -product offering, Product X. That Contributor is then a Commercial -Contributor. If that Commercial Contributor then makes performance -claims, or offers warranties related to Product X, those performance -claims and warranties are such Commercial Contributor's responsibility -alone. Under this section, the Commercial Contributor would have to -defend claims against the other Contributors related to those performance -claims and warranties, and if a court requires any other Contributor to -pay any damages as a result, the Commercial Contributor must pay -those damages. - -5. NO WARRANTY - -EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT -PERMITTED BY APPLICABLE LAW, THE PROGRAM IS PROVIDED ON AN "AS IS" -BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR -IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF -TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR -PURPOSE. Each Recipient is solely responsible for determining the -appropriateness of using and distributing the Program and assumes all -risks associated with its exercise of rights under this Agreement, -including but not limited to the risks and costs of program errors, -compliance with applicable laws, damage to or loss of data, programs -or equipment, and unavailability or interruption of operations. - -6. DISCLAIMER OF LIABILITY - -EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT -PERMITTED BY APPLICABLE LAW, NEITHER RECIPIENT NOR ANY CONTRIBUTORS -SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST -PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE -EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - -7. GENERAL - -If any provision of this Agreement is invalid or unenforceable under -applicable law, it shall not affect the validity or enforceability of -the remainder of the terms of this Agreement, and without further -action by the parties hereto, such provision shall be reformed to the -minimum extent necessary to make such provision valid and enforceable. - -If Recipient institutes patent litigation against any entity -(including a cross-claim or counterclaim in a lawsuit) alleging that the -Program itself (excluding combinations of the Program with other software -or hardware) infringes such Recipient's patent(s), then such Recipient's -rights granted under Section 2(b) shall terminate as of the date such -litigation is filed. - -All Recipient's rights under this Agreement shall terminate if it -fails to comply with any of the material terms or conditions of this -Agreement and does not cure such failure in a reasonable period of -time after becoming aware of such noncompliance. If all Recipient's -rights under this Agreement terminate, Recipient agrees to cease use -and distribution of the Program as soon as reasonably practicable. -However, Recipient's obligations under this Agreement and any licenses -granted by Recipient relating to the Program shall continue and survive. - -Everyone is permitted to copy and distribute copies of this Agreement, -but in order to avoid inconsistency the Agreement is copyrighted and -may only be modified in the following manner. The Agreement Steward -reserves the right to publish new versions (including revisions) of -this Agreement from time to time. No one other than the Agreement -Steward has the right to modify this Agreement. The Eclipse Foundation -is the initial Agreement Steward. The Eclipse Foundation may assign the -responsibility to serve as the Agreement Steward to a suitable separate -entity. Each new version of the Agreement will be given a distinguishing -version number. The Program (including Contributions) may always be -Distributed subject to the version of the Agreement under which it was -received. In addition, after a new version of the Agreement is published, -Contributor may elect to Distribute the Program (including its -Contributions) under the new version. - -Except as expressly stated in Sections 2(a) and 2(b) above, Recipient -receives no rights or licenses to the intellectual property of any -Contributor under this Agreement, whether expressly, by implication, -estoppel or otherwise. All rights in the Program not expressly granted -under this Agreement are reserved. Nothing in this Agreement is intended -to be enforceable by any entity that is not a Contributor or Recipient. -No third-party beneficiary rights are created under this Agreement. - -Exhibit A - Form of Secondary Licenses Notice - -"This Source Code may also be made available under the following -Secondary Licenses when the conditions for such availability set forth -in the Eclipse Public License, v. 2.0 are satisfied: {name license(s), -version(s), and exceptions or additional permissions here}." - - Simply including a copy of this Agreement, including this Exhibit A - is not sufficient to license the Source Code under Secondary Licenses. - - If it is not possible or desirable to put the notice in a particular - file, then You may include the notice in a location (such as a LICENSE - file in a relevant directory) where a recipient would be likely to - look for such a notice. - - You may add additional accurate notices of copyright ownership. diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/NOTICE.md b/vendor/github.com/eclipse/paho.mqtt.golang/NOTICE.md deleted file mode 100644 index 10c4a1cd..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/NOTICE.md +++ /dev/null @@ -1,77 +0,0 @@ -# Notices for paho.mqtt.golang - -This content is produced and maintained by the Eclipse Paho project. - - * Project home: https://www.eclipse.org/paho/ - -Note that a [separate mqtt v5 client](https://github.com/eclipse/paho.golang) also exists (this is a full rewrite -and deliberately incompatible with this library). - -## Trademarks - -Eclipse Mosquitto trademarks of the Eclipse Foundation. Eclipse, and the -Eclipse Logo are registered trademarks of the Eclipse Foundation. - -Paho is a trademark of the Eclipse Foundation. Eclipse, and the Eclipse Logo are -registered trademarks of the Eclipse Foundation. - -## Copyright - -All content is the property of the respective authors or their employers. -For more information regarding authorship of content, please consult the -listed source code repository logs. - -## Declared Project Licenses - -This program and the accompanying materials are made available under the terms of the -Eclipse Public License v2.0 and Eclipse Distribution License v1.0 which accompany this -distribution. - -The Eclipse Public License is available at -https://www.eclipse.org/legal/epl-2.0/ -and the Eclipse Distribution License is available at -http://www.eclipse.org/org/documents/edl-v10.php. - -For an explanation of what dual-licensing means to you, see: -https://www.eclipse.org/legal/eplfaq.php#DUALLIC - -SPDX-License-Identifier: EPL-2.0 or BSD-3-Clause - -## Source Code - -The project maintains the following source code repositories: - - * https://github.com/eclipse/paho.mqtt.golang - -## Third-party Content - -This project makes use of the follow third party projects. - -Go Programming Language and Standard Library - -* License: BSD-style license (https://golang.org/LICENSE) -* Project: https://golang.org/ - -Go Networking - -* License: BSD 3-Clause style license and patent grant. -* Project: https://cs.opensource.google/go/x/net - -Go Sync - -* License: BSD 3-Clause style license and patent grant. -* Project: https://cs.opensource.google/go/x/sync/ - -Gorilla Websockets v1.4.2 - -* License: BSD 2-Clause "Simplified" License -* Project: https://github.com/gorilla/websocket - -## Cryptography - -Content may contain encryption software. The country in which you are currently -may have restrictions on the import, possession, and use, and/or re-export to -another country, of encryption software. BEFORE using any encryption software, -please check the country's laws, regulations and policies concerning the import, -possession, or use, and re-export of encryption software, to see if this is -permitted. \ No newline at end of file diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/README.md b/vendor/github.com/eclipse/paho.mqtt.golang/README.md deleted file mode 100644 index 21ed96f3..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/README.md +++ /dev/null @@ -1,198 +0,0 @@ - -[![PkgGoDev](https://pkg.go.dev/badge/github.com/eclipse/paho.mqtt.golang)](https://pkg.go.dev/github.com/eclipse/paho.mqtt.golang) -[![Go Report Card](https://goreportcard.com/badge/github.com/eclipse/paho.mqtt.golang)](https://goreportcard.com/report/github.com/eclipse/paho.mqtt.golang) - -Eclipse Paho MQTT Go client -=========================== - - -This repository contains the source code for the [Eclipse Paho](https://eclipse.org/paho) MQTT 3.1/3.11 Go client library. - -This code builds a library which enable applications to connect to an [MQTT](https://mqtt.org) broker to publish -messages, and to subscribe to topics and receive published messages. - -This library supports a fully asynchronous mode of operation. - -A client supporting MQTT V5 is [also available](https://github.com/eclipse/paho.golang). - -Installation and Build ----------------------- - -The process depends upon whether you are using [modules](https://golang.org/ref/mod) (recommended) or `GOPATH`. - -#### Modules - -If you are using [modules](https://blog.golang.org/using-go-modules) then `import "github.com/eclipse/paho.mqtt.golang"` -and start using it. The necessary packages will be download automatically when you run `go build`. - -Note that the latest release will be downloaded and changes may have been made since the release. If you have -encountered an issue, or wish to try the latest code for another reason, then run -`go get github.com/eclipse/paho.mqtt.golang@master` to get the latest commit. - -#### GOPATH - -Installation is as easy as: - -``` -go get github.com/eclipse/paho.mqtt.golang -``` - -The client depends on Google's [proxy](https://godoc.org/golang.org/x/net/proxy) package and the -[websockets](https://godoc.org/github.com/gorilla/websocket) package, also easily installed with the commands: - -``` -go get github.com/gorilla/websocket -go get golang.org/x/net/proxy -``` - - -Usage and API -------------- - -Detailed API documentation is available by using to godoc tool, or can be browsed online -using the [pkg.go.dev](https://pkg.go.dev/github.com/eclipse/paho.mqtt.golang) service. - -Samples are available in the `cmd` directory for reference. - -Note: - -The library also supports using MQTT over websockets by using the `ws://` (unsecure) or `wss://` (secure) prefix in the -URI. If the client is running behind a corporate http/https proxy then the following environment variables `HTTP_PROXY`, -`HTTPS_PROXY` and `NO_PROXY` are taken into account when establishing the connection. - -Troubleshooting ---------------- - -If you are new to MQTT and your application is not working as expected reviewing the -[MQTT specification](https://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html), which this library implements, -is a good first step. [MQTT.org](https://mqtt.org) has some [good resources](https://mqtt.org/getting-started/) that answer many -common questions. - -### Error Handling - -The asynchronous nature of this library makes it easy to forget to check for errors. Consider using a go routine to -log these: - -```go -t := client.Publish("topic", qos, retained, msg) -go func() { - _ = t.Wait() // Can also use '<-t.Done()' in releases > 1.2.0 - if t.Error() != nil { - log.Error(t.Error()) // Use your preferred logging technique (or just fmt.Printf) - } -}() -``` - -### Logging - -If you are encountering issues then enabling logging, both within this library and on your broker, is a good way to -begin troubleshooting. This library can produce various levels of log by assigning the logging endpoints, ERROR, -CRITICAL, WARN and DEBUG. For example: - -```go -func main() { - mqtt.ERROR = log.New(os.Stdout, "[ERROR] ", 0) - mqtt.CRITICAL = log.New(os.Stdout, "[CRIT] ", 0) - mqtt.WARN = log.New(os.Stdout, "[WARN] ", 0) - mqtt.DEBUG = log.New(os.Stdout, "[DEBUG] ", 0) - - // Connect, Subscribe, Publish etc.. -} -``` - -### Common Problems - -* Seemingly random disconnections may be caused by another client connecting to the broker with the same client -identifier; this is as per the [spec](https://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc384800405). -* Unless ordered delivery of messages is essential (and you have configured your broker to support this e.g. - `max_inflight_messages=1` in mosquitto) then set `ClientOptions.SetOrderMatters(false)`. Doing so will avoid the - below issue (deadlocks due to blocking message handlers). -* A `MessageHandler` (called when a new message is received) must not block (unless - `ClientOptions.SetOrderMatters(false)` set). If you wish to perform a long-running task, or publish a message, then - please use a go routine (blocking in the handler is a common cause of unexpected `pingresp -not received, disconnecting` errors). -* When QOS1+ subscriptions have been created previously and you connect with `CleanSession` set to false it is possible -that the broker will deliver retained messages before `Subscribe` can be called. To process these messages either -configure a handler with `AddRoute` or set a `DefaultPublishHandler`. If there is no handler (or `DefaultPublishHandler`) -then inbound messages will not be acknowledged. Adding a handler (even if it's `opts.SetDefaultPublishHandler(func(mqtt.Client, mqtt.Message) {})`) -is highly recommended to avoid inadvertently hitting inflight message limits. -* Loss of network connectivity may not be detected immediately. If this is an issue then consider setting -`ClientOptions.KeepAlive` (sends regular messages to check the link is active). -* Reusing a `Client` is not completely safe. After calling `Disconnect` please create a new Client (`NewClient()`) rather -than attempting to reuse the existing one (note that features such as `SetAutoReconnect` mean this is rarely necessary). -* Brokers offer many configuration options; some settings may lead to unexpected results. -* Publish tokens will complete if the connection is lost and re-established using the default -options.SetAutoReconnect(true) functionality (token.Error() will return nil). Attempts will be made to re-deliver the -message but there is currently no easy way know when such messages are delivered. - -If using Mosquitto then there are a range of fairly common issues: -* `listener` - By default [Mosquitto v2+](https://mosquitto.org/documentation/migrating-to-2-0/) listens on loopback -interfaces only (meaning it will only accept connections made from the computer its running on). -* `max_inflight_messages` - Unless this is set to 1 mosquitto does not guarantee ordered delivery of messages. -* `max_queued_messages` / `max_queued_bytes` - These impose limits on the number/size of queued messages. The defaults -may lead to messages being silently dropped. -* `persistence` - Defaults to false (messages will not survive a broker restart) -* `max_keepalive` - defaults to 65535 and, from version 2.0.12, `SetKeepAlive(0)` will result in a rejected connection -by default. - -Reporting bugs --------------- - -Please report bugs by raising issues for this project in github https://github.com/eclipse/paho.mqtt.golang/issues - -A limited number of contributors monitor the issues section so if you have a general question please see the -resources in the [more information](#more-information) section for help. - -We welcome bug reports, but it is important they are actionable. A significant percentage of issues reported are not -resolved due to a lack of information. If we cannot replicate the problem then it is unlikely we will be able to fix it. -The information required will vary from issue to issue but almost all bug reports would be expected to include: - -* Which version of the package you are using (tag or commit - this should be in your `go.mod` file) -* A full, clear, description of the problem (detail what you are expecting vs what actually happens). -* Configuration information (code showing how you connect, please include all references to `ClientOption`) -* Broker details (name and version). - -If at all possible please also include: -* Details of your attempts to resolve the issue (what have you tried, what worked, what did not). -* A [Minimal, Reproducible Example](https://stackoverflow.com/help/minimal-reproducible-example). Providing an example -is the best way to demonstrate the issue you are facing; it is important this includes all relevant information -(including broker configuration). Docker (see `cmd/docker`) makes it relatively simple to provide a working end-to-end -example. -* Broker logs covering the period the issue occurred. -* [Application Logs](#logging) covering the period the issue occurred. Unless you have isolated the root cause of the -issue please include a link to a full log (including data from well before the problem arose). - -It is important to remember that this library does not stand alone; it communicates with a broker and any issues you are -seeing may be due to: - -* Bugs in your code. -* Bugs in this library. -* The broker configuration. -* Bugs in the broker. -* Issues with whatever you are communicating with. - -When submitting an issue, please ensure that you provide sufficient details to enable us to eliminate causes outside of -this library. - -Contributing ------------- - -We welcome pull requests but before your contribution can be accepted by the project, you need to create and -electronically sign the Eclipse Contributor Agreement (ECA) and sign off on the Eclipse Foundation Certificate of Origin. - -More information is available in the -[Eclipse Development Resources](http://wiki.eclipse.org/Development_Resources/Contributing_via_Git); please take special -note of the requirement that the commit record contain a "Signed-off-by" entry. - -More information ----------------- - -[Stack Overflow](https://stackoverflow.com/questions/tagged/mqtt+go) has a range questions/answers covering a range of -common issues (both relating to use of this library and MQTT in general). This is the best place to ask general questions -(including those relating to the use of this library). - -Discussion of the Paho clients takes place on the [Eclipse paho-dev mailing list](https://dev.eclipse.org/mailman/listinfo/paho-dev). - -General questions about the MQTT protocol are discussed in the [MQTT Google Group](https://groups.google.com/forum/?hl=en-US&fromgroups#!forum/mqtt). - -There is much more information available via the [MQTT community site](http://mqtt.org). diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/backoff.go b/vendor/github.com/eclipse/paho.mqtt.golang/backoff.go deleted file mode 100644 index 8ee06f69..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/backoff.go +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Matt Brittan - * Daichi Tomaru - */ - -package mqtt - -import ( - "sync" - "time" -) - -// Controller for sleep with backoff when the client attempts reconnection -// It has statuses for each situations cause reconnection. -type backoffController struct { - sync.RWMutex - statusMap map[string]*backoffStatus -} - -type backoffStatus struct { - lastSleepPeriod time.Duration - lastErrorTime time.Time -} - -func newBackoffController() *backoffController { - return &backoffController{ - statusMap: map[string]*backoffStatus{}, - } -} - -// Calculate next sleep period from the specified parameters. -// Returned values are next sleep period and whether the error situation is continual. -// If connection errors continuouslly occurs, its sleep period is exponentially increased. -// Also if there is a lot of time between last and this error, sleep period is initialized. -func (b *backoffController) getBackoffSleepTime( - situation string, initSleepPeriod time.Duration, maxSleepPeriod time.Duration, processTime time.Duration, skipFirst bool, -) (time.Duration, bool) { - // Decide first sleep time if the situation is not continual. - var firstProcess = func(status *backoffStatus, init time.Duration, skip bool) (time.Duration, bool) { - if skip { - status.lastSleepPeriod = 0 - return 0, false - } - status.lastSleepPeriod = init - return init, false - } - - // Prioritize maxSleep. - if initSleepPeriod > maxSleepPeriod { - initSleepPeriod = maxSleepPeriod - } - b.Lock() - defer b.Unlock() - - status, exist := b.statusMap[situation] - if !exist { - b.statusMap[situation] = &backoffStatus{initSleepPeriod, time.Now()} - return firstProcess(b.statusMap[situation], initSleepPeriod, skipFirst) - } - - oldTime := status.lastErrorTime - status.lastErrorTime = time.Now() - - // When there is a lot of time between last and this error, sleep period is initialized. - if status.lastErrorTime.Sub(oldTime) > (processTime * 2 + status.lastSleepPeriod) { - return firstProcess(status, initSleepPeriod, skipFirst) - } - - if status.lastSleepPeriod == 0 { - status.lastSleepPeriod = initSleepPeriod - return initSleepPeriod, true - } - - if nextSleepPeriod := status.lastSleepPeriod * 2; nextSleepPeriod <= maxSleepPeriod { - status.lastSleepPeriod = nextSleepPeriod - } else { - status.lastSleepPeriod = maxSleepPeriod - } - - return status.lastSleepPeriod, true -} - -// Execute sleep the time returned from getBackoffSleepTime. -func (b *backoffController) sleepWithBackoff( - situation string, initSleepPeriod time.Duration, maxSleepPeriod time.Duration, processTime time.Duration, skipFirst bool, -) (time.Duration, bool) { - sleep, isFirst := b.getBackoffSleepTime(situation, initSleepPeriod, maxSleepPeriod, processTime, skipFirst) - if sleep != 0 { - time.Sleep(sleep) - } - return sleep, isFirst -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/client.go b/vendor/github.com/eclipse/paho.mqtt.golang/client.go deleted file mode 100644 index 9fe349e6..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/client.go +++ /dev/null @@ -1,1240 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - * Matt Brittan - */ - -// Portions copyright © 2018 TIBCO Software Inc. - -// Package mqtt provides an MQTT v3.1.1 client library. -package mqtt - -import ( - "bytes" - "context" - "errors" - "fmt" - "net" - "strings" - "sync" - "sync/atomic" - "time" - - "golang.org/x/sync/semaphore" - - "github.com/eclipse/paho.mqtt.golang/packets" -) - -// Client is the interface definition for a Client as used by this -// library, the interface is primarily to allow mocking tests. -// -// It is an MQTT v3.1.1 client for communicating -// with an MQTT server using non-blocking methods that allow work -// to be done in the background. -// An application may connect to an MQTT server using: -// -// A plain TCP socket (e.g. mqtt://test.mosquitto.org:1833) -// A secure SSL/TLS socket (e.g. tls://test.mosquitto.org:8883) -// A websocket (e.g ws://test.mosquitto.org:8080 or wss://test.mosquitto.org:8081) -// Something else (using `options.CustomOpenConnectionFn`) -// -// To enable ensured message delivery at Quality of Service (QoS) levels -// described in the MQTT spec, a message persistence mechanism must be -// used. This is done by providing a type which implements the Store -// interface. For convenience, FileStore and MemoryStore are provided -// implementations that should be sufficient for most use cases. More -// information can be found in their respective documentation. -// Numerous connection options may be specified by configuring a -// and then supplying a ClientOptions type. -// Implementations of Client must be safe for concurrent use by multiple -// goroutines -type Client interface { - // IsConnected returns a bool signifying whether - // the client is connected or not. - IsConnected() bool - // IsConnectionOpen return a bool signifying whether the client has an active - // connection to mqtt broker, i.e not in disconnected or reconnect mode - IsConnectionOpen() bool - // Connect will create a connection to the message broker, by default - // it will attempt to connect at v3.1.1 and auto retry at v3.1 if that - // fails - Connect() Token - // Disconnect will end the connection with the server, but not before waiting - // the specified number of milliseconds to wait for existing work to be - // completed. - Disconnect(quiesce uint) - // Publish will publish a message with the specified QoS and content - // to the specified topic. - // Returns a token to track delivery of the message to the broker - Publish(topic string, qos byte, retained bool, payload interface{}) Token - // Subscribe starts a new subscription. Provide a MessageHandler to be executed when - // a message is published on the topic provided, or nil for the default handler. - // - // If options.OrderMatters is true (the default) then callback must not block or - // call functions within this package that may block (e.g. Publish) other than in - // a new go routine. - // callback must be safe for concurrent use by multiple goroutines. - Subscribe(topic string, qos byte, callback MessageHandler) Token - // SubscribeMultiple starts a new subscription for multiple topics. Provide a MessageHandler to - // be executed when a message is published on one of the topics provided, or nil for the - // default handler. - // - // If options.OrderMatters is true (the default) then callback must not block or - // call functions within this package that may block (e.g. Publish) other than in - // a new go routine. - // callback must be safe for concurrent use by multiple goroutines. - SubscribeMultiple(filters map[string]byte, callback MessageHandler) Token - // Unsubscribe will end the subscription from each of the topics provided. - // Messages published to those topics from other clients will no longer be - // received. - Unsubscribe(topics ...string) Token - // AddRoute allows you to add a handler for messages on a specific topic - // without making a subscription. For example having a different handler - // for parts of a wildcard subscription or for receiving retained messages - // upon connection (before Sub scribe can be processed). - // - // If options.OrderMatters is true (the default) then callback must not block or - // call functions within this package that may block (e.g. Publish) other than in - // a new go routine. - // callback must be safe for concurrent use by multiple goroutines. - AddRoute(topic string, callback MessageHandler) - // OptionsReader returns a ClientOptionsReader which is a copy of the clientoptions - // in use by the client. - OptionsReader() ClientOptionsReader -} - -// client implements the Client interface -// clients are safe for concurrent use by multiple -// goroutines -type client struct { - lastSent atomic.Value // time.Time - the last time a packet was successfully sent to network - lastReceived atomic.Value // time.Time - the last time a packet was successfully received from network - pingOutstanding int32 // set to 1 if a ping has been sent but response not ret received - - status connectionStatus // see constants in status.go for values - - messageIds // effectively a map from message id to token completor - - obound chan *PacketAndToken // outgoing publish packet - oboundP chan *PacketAndToken // outgoing 'priority' packet (anything other than publish) - msgRouter *router // routes topics to handlers - persist Store - options ClientOptions - optionsMu sync.Mutex // Protects the options in a few limited cases where needed for testing - - conn net.Conn // the network connection, must only be set with connMu locked (only used when starting/stopping workers) - connMu sync.Mutex // mutex for the connection (again only used in two functions) - - stop chan struct{} // Closed to request that workers stop - workers sync.WaitGroup // used to wait for workers to complete (ping, keepalive, errwatch, resume) - commsStopped chan struct{} // closed when the comms routines have stopped (kept running until after workers have closed to avoid deadlocks) - - backoff *backoffController -} - -// NewClient will create an MQTT v3.1.1 client with all of the options specified -// in the provided ClientOptions. The client must have the Connect method called -// on it before it may be used. This is to make sure resources (such as a net -// connection) are created before the application is actually ready. -func NewClient(o *ClientOptions) Client { - c := &client{} - c.options = *o - - if c.options.Store == nil { - c.options.Store = NewMemoryStore() - } - switch c.options.ProtocolVersion { - case 3, 4: - c.options.protocolVersionExplicit = true - case 0x83, 0x84: - c.options.protocolVersionExplicit = true - default: - c.options.ProtocolVersion = 4 - c.options.protocolVersionExplicit = false - } - c.persist = c.options.Store - c.messageIds = messageIds{index: make(map[uint16]tokenCompletor)} - c.msgRouter = newRouter() - c.msgRouter.setDefaultHandler(c.options.DefaultPublishHandler) - c.obound = make(chan *PacketAndToken) - c.oboundP = make(chan *PacketAndToken) - c.backoff = newBackoffController() - return c -} - -// AddRoute allows you to add a handler for messages on a specific topic -// without making a subscription. For example having a different handler -// for parts of a wildcard subscription -// -// If options.OrderMatters is true (the default) then callback must not block or -// call functions within this package that may block (e.g. Publish) other than in -// a new go routine. -// callback must be safe for concurrent use by multiple goroutines. -func (c *client) AddRoute(topic string, callback MessageHandler) { - if callback != nil { - c.msgRouter.addRoute(topic, callback) - } -} - -// IsConnected returns a bool signifying whether -// the client is connected or not. -// connected means that the connection is up now OR it will -// be established/reestablished automatically when possible -// Warning: The connection status may change at any time so use this with care! -func (c *client) IsConnected() bool { - // This will need to change if additional statuses are added - s, r := c.status.ConnectionStatusRetry() - switch { - case s == connected: - return true - case c.options.ConnectRetry && s == connecting: - return true - case c.options.AutoReconnect: - return s == reconnecting || (s == disconnecting && r) // r indicates we will reconnect - default: - return false - } -} - -// IsConnectionOpen return a bool signifying whether the client has an active -// connection to mqtt broker, i.e. not in disconnected or reconnect mode -// Warning: The connection status may change at any time so use this with care! -func (c *client) IsConnectionOpen() bool { - return c.status.ConnectionStatus() == connected -} - -// ErrNotConnected is the error returned from function calls that are -// made when the client is not connected to a broker -var ErrNotConnected = errors.New("not Connected") - -// Connect will create a connection to the message broker, by default -// it will attempt to connect at v3.1.1 and auto retry at v3.1 if that -// fails -// Note: If using QOS1+ and CleanSession=false it is advisable to add -// routes (or a DefaultPublishHandler) prior to calling Connect() -// because queued messages may be delivered immediately post connection -func (c *client) Connect() Token { - t := newToken(packets.Connect).(*ConnectToken) - DEBUG.Println(CLI, "Connect()") - - connectionUp, err := c.status.Connecting() - if err != nil { - if err == errAlreadyConnectedOrReconnecting && c.options.AutoReconnect { - // When reconnection is active we don't consider calls tro Connect to ba an error (mainly for compatability) - WARN.Println(CLI, "Connect() called but not disconnected") - t.returnCode = packets.Accepted - t.flowComplete() - return t - } - ERROR.Println(CLI, err) // CONNECT should never be called unless we are disconnected - t.setError(err) - return t - } - - c.persist.Open() - if c.options.ConnectRetry { - c.reserveStoredPublishIDs() // Reserve IDs to allow publishing before connect complete - } - - go func() { - if len(c.options.Servers) == 0 { - t.setError(fmt.Errorf("no servers defined to connect to")) - if err := connectionUp(false); err != nil { - ERROR.Println(CLI, err.Error()) - } - return - } - - RETRYCONN: - var conn net.Conn - var rc byte - var err error - conn, rc, t.sessionPresent, err = c.attemptConnection() - if err != nil { - if c.options.ConnectRetry { - DEBUG.Println(CLI, "Connect failed, sleeping for", int(c.options.ConnectRetryInterval.Seconds()), "seconds and will then retry, error:", err.Error()) - time.Sleep(c.options.ConnectRetryInterval) - - if c.status.ConnectionStatus() == connecting { // Possible connection aborted elsewhere - goto RETRYCONN - } - } - ERROR.Println(CLI, "Failed to connect to a broker") - c.persist.Close() - t.returnCode = rc - t.setError(err) - if err := connectionUp(false); err != nil { - ERROR.Println(CLI, err.Error()) - } - return - } - inboundFromStore := make(chan packets.ControlPacket) // there may be some inbound comms packets in the store that are awaiting processing - if c.startCommsWorkers(conn, connectionUp, inboundFromStore) { // note that this takes care of updating the status (to connected or disconnected) - // Take care of any messages in the store - if !c.options.CleanSession { - c.resume(c.options.ResumeSubs, inboundFromStore) - } else { - c.persist.Reset() - } - } else { // Note: With the new status subsystem this should only happen if Disconnect called simultaneously with the above - WARN.Println(CLI, "Connect() called but connection established in another goroutine") - } - - close(inboundFromStore) - t.flowComplete() - DEBUG.Println(CLI, "exit startClient") - }() - return t -} - -// internal function used to reconnect the client when it loses its connection -// The connection status MUST be reconnecting prior to calling this function (via call to status.connectionLost) -func (c *client) reconnect(connectionUp connCompletedFn) { - DEBUG.Println(CLI, "enter reconnect") - var ( - initSleep = 1 * time.Second - conn net.Conn - ) - - // If the reason of connection lost is same as the before one, sleep timer is set before attempting connection is started. - // Sleep time is exponentially increased as the same situation continues - if slp, isContinual := c.backoff.sleepWithBackoff("connectionLost", initSleep, c.options.MaxReconnectInterval, 3 * time.Second, true); isContinual { - DEBUG.Println(CLI, "Detect continual connection lost after reconnect, slept for", int(slp.Seconds()), "seconds") - } - - for { - if nil != c.options.OnReconnecting { - c.options.OnReconnecting(c, &c.options) - } - var err error - conn, _, _, err = c.attemptConnection() - if err == nil { - break - } - sleep, _ := c.backoff.sleepWithBackoff("attemptReconnection", initSleep, c.options.MaxReconnectInterval, c.options.ConnectTimeout, false) - DEBUG.Println(CLI, "Reconnect failed, slept for", int(sleep.Seconds()), "seconds:", err) - - if c.status.ConnectionStatus() != reconnecting { // Disconnect may have been called - if err := connectionUp(false); err != nil { // Should always return an error - ERROR.Println(CLI, err.Error()) - } - DEBUG.Println(CLI, "Client moved to disconnected state while reconnecting, abandoning reconnect") - return - } - } - - inboundFromStore := make(chan packets.ControlPacket) // there may be some inbound comms packets in the store that are awaiting processing - if c.startCommsWorkers(conn, connectionUp, inboundFromStore) { // note that this takes care of updating the status (to connected or disconnected) - c.resume(c.options.ResumeSubs, inboundFromStore) - } - close(inboundFromStore) -} - -// attemptConnection makes a single attempt to connect to each of the brokers -// the protocol version to use is passed in (as c.options.ProtocolVersion) -// Note: Does not set c.conn in order to minimise race conditions -// Returns: -// net.Conn - Connected network connection -// byte - Return code (packets.Accepted indicates a successful connection). -// bool - SessionPresent flag from the connect ack (only valid if packets.Accepted) -// err - Error (err != nil guarantees that conn has been set to active connection). -func (c *client) attemptConnection() (net.Conn, byte, bool, error) { - protocolVersion := c.options.ProtocolVersion - var ( - sessionPresent bool - conn net.Conn - err error - rc byte - ) - - c.optionsMu.Lock() // Protect c.options.Servers so that servers can be added in test cases - brokers := c.options.Servers - c.optionsMu.Unlock() - for _, broker := range brokers { - cm := newConnectMsgFromOptions(&c.options, broker) - DEBUG.Println(CLI, "about to write new connect msg") - CONN: - tlsCfg := c.options.TLSConfig - if c.options.OnConnectAttempt != nil { - DEBUG.Println(CLI, "using custom onConnectAttempt handler...") - tlsCfg = c.options.OnConnectAttempt(broker, c.options.TLSConfig) - } - connDeadline := time.Now().Add(c.options.ConnectTimeout) // Time by which connection must be established - dialer := c.options.Dialer - if dialer == nil { // - WARN.Println(CLI, "dialer was nil, using default") - dialer = &net.Dialer{Timeout: 30 * time.Second} - } - // Start by opening the network connection (tcp, tls, ws) etc - if c.options.CustomOpenConnectionFn != nil { - conn, err = c.options.CustomOpenConnectionFn(broker, c.options) - } else { - conn, err = openConnection(broker, tlsCfg, c.options.ConnectTimeout, c.options.HTTPHeaders, c.options.WebsocketOptions, dialer) - } - if err != nil { - ERROR.Println(CLI, err.Error()) - WARN.Println(CLI, "failed to connect to broker, trying next") - rc = packets.ErrNetworkError - continue - } - DEBUG.Println(CLI, "socket connected to broker") - - // Now we perform the MQTT connection handshake ensuring that it does not exceed the timeout - if err := conn.SetDeadline(connDeadline); err != nil { - ERROR.Println(CLI, "set deadline for handshake ", err) - } - - // Now we perform the MQTT connection handshake - rc, sessionPresent, err = connectMQTT(conn, cm, protocolVersion) - if rc == packets.Accepted { - if err := conn.SetDeadline(time.Time{}); err != nil { - ERROR.Println(CLI, "reset deadline following handshake ", err) - } - break // successfully connected - } - - // We may have to attempt the connection with MQTT 3.1 - _ = conn.Close() - - if !c.options.protocolVersionExplicit && protocolVersion == 4 { // try falling back to 3.1? - DEBUG.Println(CLI, "Trying reconnect using MQTT 3.1 protocol") - protocolVersion = 3 - goto CONN - } - if c.options.protocolVersionExplicit { // to maintain logging from previous version - ERROR.Println(CLI, "Connecting to", broker, "CONNACK was not CONN_ACCEPTED, but rather", packets.ConnackReturnCodes[rc]) - } - } - // If the connection was successful we set member variable and lock in the protocol version for future connection attempts (and users) - if rc == packets.Accepted { - c.options.ProtocolVersion = protocolVersion - c.options.protocolVersionExplicit = true - } else { - // Maintain same error format as used previously - if rc != packets.ErrNetworkError { // mqtt error - err = packets.ConnErrors[rc] - } else { // network error (if this occurred in ConnectMQTT then err will be nil) - err = fmt.Errorf("%s : %s", packets.ConnErrors[rc], err) - } - } - return conn, rc, sessionPresent, err -} - -// Disconnect will end the connection with the server, but not before waiting -// the specified number of milliseconds to wait for existing work to be -// completed. -// WARNING: `Disconnect` may return before all activities (goroutines) have completed. This means that -// reusing the `client` may lead to panics. If you want to reconnect when the connection drops then use -// `SetAutoReconnect` and/or `SetConnectRetry`options instead of implementing this yourself. -func (c *client) Disconnect(quiesce uint) { - done := make(chan struct{}) // Simplest way to ensure quiesce is always honoured - go func() { - defer close(done) - disDone, err := c.status.Disconnecting() - if err != nil { - // Status has been set to disconnecting, but we had to wait for something else to complete - WARN.Println(CLI, err.Error()) - return - } - defer func() { - c.disconnect() // Force disconnection - disDone() // Update status - }() - DEBUG.Println(CLI, "disconnecting") - dm := packets.NewControlPacket(packets.Disconnect).(*packets.DisconnectPacket) - dt := newToken(packets.Disconnect) - select { - case c.oboundP <- &PacketAndToken{p: dm, t: dt}: - // wait for work to finish, or quiesce time consumed - DEBUG.Println(CLI, "calling WaitTimeout") - dt.WaitTimeout(time.Duration(quiesce) * time.Millisecond) - DEBUG.Println(CLI, "WaitTimeout done") - // Below code causes a potential data race. Following status refactor it should no longer be required - // but leaving in as need to check code further. - // case <-c.commsStopped: - // WARN.Println("Disconnect packet could not be sent because comms stopped") - case <-time.After(time.Duration(quiesce) * time.Millisecond): - WARN.Println("Disconnect packet not sent due to timeout") - } - }() - - // Return when done or after timeout expires (would like to change but this maintains compatibility) - delay := time.NewTimer(time.Duration(quiesce) * time.Millisecond) - select { - case <-done: - if !delay.Stop() { - <-delay.C - } - case <-delay.C: - } -} - -// forceDisconnect will end the connection with the mqtt broker immediately (used for tests only) -func (c *client) forceDisconnect() { - disDone, err := c.status.Disconnecting() - if err != nil { - // Possible that we are not actually connected - WARN.Println(CLI, err.Error()) - return - } - DEBUG.Println(CLI, "forcefully disconnecting") - c.disconnect() - disDone() -} - -// disconnect cleans up after a final disconnection (user requested so no auto reconnection) -func (c *client) disconnect() { - done := c.stopCommsWorkers() - if done != nil { - <-done // Wait until the disconnect is complete (to limit chance that another connection will be started) - DEBUG.Println(CLI, "forcefully disconnecting") - c.messageIds.cleanUp() - DEBUG.Println(CLI, "disconnected") - c.persist.Close() - } -} - -// internalConnLost cleanup when connection is lost or an error occurs -// Note: This function will not block -func (c *client) internalConnLost(whyConnLost error) { - // It is possible that internalConnLost will be called multiple times simultaneously - // (including after sending a DisconnectPacket) as such we only do cleanup etc if the - // routines were actually running and are not being disconnected at users request - DEBUG.Println(CLI, "internalConnLost called") - disDone, err := c.status.ConnectionLost(c.options.AutoReconnect && c.status.ConnectionStatus() > connecting) - if err != nil { - if err == errConnLossWhileDisconnecting || err == errAlreadyHandlingConnectionLoss { - return // Loss of connection is expected or already being handled - } - ERROR.Println(CLI, fmt.Sprintf("internalConnLost unexpected status: %s", err.Error())) - return - } - - // c.stopCommsWorker returns a channel that is closed when the operation completes. This was required prior - // to the implementation of proper status management but has been left in place, for now, to minimise change - stopDone := c.stopCommsWorkers() - // stopDone was required in previous versions because there was no connectionLost status (and there were - // issues with status handling). This code has been left in place for the time being just in case the new - // status handling contains bugs (refactoring required at some point). - if stopDone == nil { // stopDone will be nil if workers already in the process of stopping or stopped - ERROR.Println(CLI, "internalConnLost stopDone unexpectedly nil - BUG BUG") - // Cannot really do anything other than leave things disconnected - if _, err = disDone(false); err != nil { // Safest option - cannot leave status as connectionLost - ERROR.Println(CLI, fmt.Sprintf("internalConnLost failed to set status to disconnected (stopDone): %s", err.Error())) - } - return - } - - // It may take a while for the disconnection to complete whatever called us needs to exit cleanly so finnish in goRoutine - go func() { - DEBUG.Println(CLI, "internalConnLost waiting on workers") - <-stopDone - DEBUG.Println(CLI, "internalConnLost workers stopped") - - reConnDone, err := disDone(true) - if err != nil { - ERROR.Println(CLI, "failure whilst reporting completion of disconnect", err) - } else if reConnDone == nil { // Should never happen - ERROR.Println(CLI, "BUG BUG BUG reconnection function is nil", err) - } - - reconnect := err == nil && reConnDone != nil - - if c.options.CleanSession && !reconnect { - c.messageIds.cleanUp() // completes PUB/SUB/UNSUB tokens - } else if !c.options.ResumeSubs { - c.messageIds.cleanUpSubscribe() // completes SUB/UNSUB tokens - } - if reconnect { - go c.reconnect(reConnDone) // Will set connection status to reconnecting - } - if c.options.OnConnectionLost != nil { - go c.options.OnConnectionLost(c, whyConnLost) - } - DEBUG.Println(CLI, "internalConnLost complete") - }() -} - -// startCommsWorkers is called when the connection is up. -// It starts off the routines needed to process incoming and outgoing messages. -// Returns true if the comms workers were started (i.e. successful connection) -// connectionUp(true) will be called once everything is up; connectionUp(false) will be called on failure -func (c *client) startCommsWorkers(conn net.Conn, connectionUp connCompletedFn, inboundFromStore <-chan packets.ControlPacket) bool { - DEBUG.Println(CLI, "startCommsWorkers called") - c.connMu.Lock() - defer c.connMu.Unlock() - if c.conn != nil { // Should never happen due to new status handling; leaving in for safety for the time being - WARN.Println(CLI, "startCommsWorkers called when commsworkers already running BUG BUG") - _ = conn.Close() // No use for the new network connection - if err := connectionUp(false); err != nil { - ERROR.Println(CLI, err.Error()) - } - return false - } - c.conn = conn // Store the connection - - c.stop = make(chan struct{}) - if c.options.KeepAlive != 0 { - atomic.StoreInt32(&c.pingOutstanding, 0) - c.lastReceived.Store(time.Now()) - c.lastSent.Store(time.Now()) - c.workers.Add(1) - go keepalive(c, conn) - } - - // matchAndDispatch will process messages received from the network. It may generate acknowledgements - // It will complete when incomingPubChan is closed and will close ackOut prior to exiting - incomingPubChan := make(chan *packets.PublishPacket) - c.workers.Add(1) // Done will be called when ackOut is closed - ackOut := c.msgRouter.matchAndDispatch(incomingPubChan, c.options.Order, c) - - // The connection is now ready for use (we spin up a few go routines below). It is possible that - // Disconnect has been called in the interim... - if err := connectionUp(true); err != nil { - DEBUG.Println(CLI, err) - close(c.stop) // Tidy up anything we have already started - close(incomingPubChan) - c.workers.Wait() - c.conn.Close() - c.conn = nil - return false - } - DEBUG.Println(CLI, "client is connected/reconnected") - if c.options.OnConnect != nil { - go c.options.OnConnect(c) - } - - // c.oboundP and c.obound need to stay active for the life of the client because, depending upon the options, - // messages may be published while the client is disconnected (they will block unless in a goroutine). However - // to keep the comms routines clean we want to shutdown the input messages it uses so create out own channels - // and copy data across. - commsobound := make(chan *PacketAndToken) // outgoing publish packets - commsoboundP := make(chan *PacketAndToken) // outgoing 'priority' packet - c.workers.Add(1) - go func() { - defer c.workers.Done() - for { - select { - case msg := <-c.oboundP: - commsoboundP <- msg - case msg := <-c.obound: - commsobound <- msg - case msg, ok := <-ackOut: - if !ok { - ackOut = nil // ignore channel going forward - c.workers.Done() // matchAndDispatch has completed - continue // await next message - } - commsoboundP <- msg - case <-c.stop: - // Attempt to transmit any outstanding acknowledgements (this may well fail but should work if this is a clean disconnect) - if ackOut != nil { - for msg := range ackOut { - commsoboundP <- msg - } - c.workers.Done() // matchAndDispatch has completed - } - close(commsoboundP) // Nothing sending to these channels anymore so close them and allow comms routines to exit - close(commsobound) - DEBUG.Println(CLI, "startCommsWorkers output redirector finished") - return - } - } - }() - - commsIncomingPub, commsErrors := startComms(c.conn, c, inboundFromStore, commsoboundP, commsobound) - c.commsStopped = make(chan struct{}) - go func() { - for { - if commsIncomingPub == nil && commsErrors == nil { - break - } - select { - case pub, ok := <-commsIncomingPub: - if !ok { - // Incoming comms has shutdown - close(incomingPubChan) // stop the router - commsIncomingPub = nil - continue - } - // Care is needed here because an error elsewhere could trigger a deadlock - sendPubLoop: - for { - select { - case incomingPubChan <- pub: - break sendPubLoop - case err, ok := <-commsErrors: - if !ok { // commsErrors has been closed so we can ignore it - commsErrors = nil - continue - } - ERROR.Println(CLI, "Connect comms goroutine - error triggered during send Pub", err) - c.internalConnLost(err) // no harm in calling this if the connection is already down (or shutdown is in progress) - continue - } - } - case err, ok := <-commsErrors: - if !ok { - commsErrors = nil - continue - } - ERROR.Println(CLI, "Connect comms goroutine - error triggered", err) - c.internalConnLost(err) // no harm in calling this if the connection is already down (or shutdown is in progress) - continue - } - } - DEBUG.Println(CLI, "incoming comms goroutine done") - close(c.commsStopped) - }() - DEBUG.Println(CLI, "startCommsWorkers done") - return true -} - -// stopWorkersAndComms - Cleanly shuts down worker go routines (including the comms routines) and waits until everything has stopped -// Returns nil if workers did not need to be stopped; otherwise returns a channel which will be closed when the stop is complete -// Note: This may block so run as a go routine if calling from any of the comms routines -// Note2: It should be possible to simplify this now that the new status management code is in place. -func (c *client) stopCommsWorkers() chan struct{} { - DEBUG.Println(CLI, "stopCommsWorkers called") - // It is possible that this function will be called multiple times simultaneously due to the way things get shutdown - c.connMu.Lock() - if c.conn == nil { - DEBUG.Println(CLI, "stopCommsWorkers done (not running)") - c.connMu.Unlock() - return nil - } - - // It is important that everything is stopped in the correct order to avoid deadlocks. The main issue here is - // the router because it both receives incoming publish messages and also sends outgoing acknowledgements. To - // avoid issues we signal the workers to stop and close the connection (it is probably already closed but - // there is no harm in being sure). We can then wait for the workers to finnish before closing outbound comms - // channels which will allow the comms routines to exit. - - // We stop all non-comms related workers first (ping, keepalive, errwatch, resume etc) so they don't get blocked waiting on comms - close(c.stop) // Signal for workers to stop - c.conn.Close() // Possible that this is already closed but no harm in closing again - c.conn = nil // Important that this is the only place that this is set to nil - c.connMu.Unlock() // As the connection is now nil we can unlock the mu (allowing subsequent calls to exit immediately) - - doneChan := make(chan struct{}) - - go func() { - DEBUG.Println(CLI, "stopCommsWorkers waiting for workers") - c.workers.Wait() - - // Stopping the workers will allow the comms routines to exit; we wait for these to complete - DEBUG.Println(CLI, "stopCommsWorkers waiting for comms") - <-c.commsStopped // wait for comms routine to stop - - DEBUG.Println(CLI, "stopCommsWorkers done") - close(doneChan) - }() - return doneChan -} - -// Publish will publish a message with the specified QoS and content -// to the specified topic. -// Returns a token to track delivery of the message to the broker -func (c *client) Publish(topic string, qos byte, retained bool, payload interface{}) Token { - token := newToken(packets.Publish).(*PublishToken) - DEBUG.Println(CLI, "enter Publish") - switch { - case !c.IsConnected(): - token.setError(ErrNotConnected) - return token - case c.status.ConnectionStatus() == reconnecting && qos == 0: - // message written to store and will be sent when connection comes up - token.flowComplete() - return token - } - pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) - pub.Qos = qos - pub.TopicName = topic - pub.Retain = retained - switch p := payload.(type) { - case string: - pub.Payload = []byte(p) - case []byte: - pub.Payload = p - case bytes.Buffer: - pub.Payload = p.Bytes() - default: - token.setError(fmt.Errorf("unknown payload type")) - return token - } - - if pub.Qos != 0 && pub.MessageID == 0 { - mID := c.getID(token) - if mID == 0 { - token.setError(fmt.Errorf("no message IDs available")) - return token - } - pub.MessageID = mID - token.messageID = mID - } - persistOutbound(c.persist, pub) - switch c.status.ConnectionStatus() { - case connecting: - DEBUG.Println(CLI, "storing publish message (connecting), topic:", topic) - case reconnecting: - DEBUG.Println(CLI, "storing publish message (reconnecting), topic:", topic) - case disconnecting: - DEBUG.Println(CLI, "storing publish message (disconnecting), topic:", topic) - default: - DEBUG.Println(CLI, "sending publish message, topic:", topic) - publishWaitTimeout := c.options.WriteTimeout - if publishWaitTimeout == 0 { - publishWaitTimeout = time.Second * 30 - } - select { - case c.obound <- &PacketAndToken{p: pub, t: token}: - case <-time.After(publishWaitTimeout): - token.setError(errors.New("publish was broken by timeout")) - } - } - return token -} - -// Subscribe starts a new subscription. Provide a MessageHandler to be executed when -// a message is published on the topic provided. -// -// If options.OrderMatters is true (the default) then callback must not block or -// call functions within this package that may block (e.g. Publish) other than in -// a new go routine. -// callback must be safe for concurrent use by multiple goroutines. -func (c *client) Subscribe(topic string, qos byte, callback MessageHandler) Token { - token := newToken(packets.Subscribe).(*SubscribeToken) - DEBUG.Println(CLI, "enter Subscribe") - if !c.IsConnected() { - token.setError(ErrNotConnected) - return token - } - if !c.IsConnectionOpen() { - switch { - case !c.options.ResumeSubs: - // if not connected and resumeSubs not set this sub will be thrown away - token.setError(fmt.Errorf("not currently connected and ResumeSubs not set")) - return token - case c.options.CleanSession && c.status.ConnectionStatus() == reconnecting: - // if reconnecting and cleanSession is true this sub will be thrown away - token.setError(fmt.Errorf("reconnecting state and cleansession is true")) - return token - } - } - sub := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket) - if err := validateTopicAndQos(topic, qos); err != nil { - token.setError(err) - return token - } - sub.Topics = append(sub.Topics, topic) - sub.Qoss = append(sub.Qoss, qos) - - if strings.HasPrefix(topic, "$share/") { - topic = strings.Join(strings.Split(topic, "/")[2:], "/") - } - - if strings.HasPrefix(topic, "$queue/") { - topic = strings.TrimPrefix(topic, "$queue/") - } - - if callback != nil { - c.msgRouter.addRoute(topic, callback) - } - - token.subs = append(token.subs, topic) - - if sub.MessageID == 0 { - mID := c.getID(token) - if mID == 0 { - token.setError(fmt.Errorf("no message IDs available")) - return token - } - sub.MessageID = mID - token.messageID = mID - } - DEBUG.Println(CLI, sub.String()) - - if c.options.ResumeSubs { // Only persist if we need this to resume subs after a disconnection - persistOutbound(c.persist, sub) - } - switch c.status.ConnectionStatus() { - case connecting: - DEBUG.Println(CLI, "storing subscribe message (connecting), topic:", topic) - case reconnecting: - DEBUG.Println(CLI, "storing subscribe message (reconnecting), topic:", topic) - case disconnecting: - DEBUG.Println(CLI, "storing subscribe message (disconnecting), topic:", topic) - default: - DEBUG.Println(CLI, "sending subscribe message, topic:", topic) - subscribeWaitTimeout := c.options.WriteTimeout - if subscribeWaitTimeout == 0 { - subscribeWaitTimeout = time.Second * 30 - } - select { - case c.oboundP <- &PacketAndToken{p: sub, t: token}: - case <-time.After(subscribeWaitTimeout): - token.setError(errors.New("subscribe was broken by timeout")) - } - } - DEBUG.Println(CLI, "exit Subscribe") - return token -} - -// SubscribeMultiple starts a new subscription for multiple topics. Provide a MessageHandler to -// be executed when a message is published on one of the topics provided. -// -// If options.OrderMatters is true (the default) then callback must not block or -// call functions within this package that may block (e.g. Publish) other than in -// a new go routine. -// callback must be safe for concurrent use by multiple goroutines. -func (c *client) SubscribeMultiple(filters map[string]byte, callback MessageHandler) Token { - var err error - token := newToken(packets.Subscribe).(*SubscribeToken) - DEBUG.Println(CLI, "enter SubscribeMultiple") - if !c.IsConnected() { - token.setError(ErrNotConnected) - return token - } - if !c.IsConnectionOpen() { - switch { - case !c.options.ResumeSubs: - // if not connected and resumesubs not set this sub will be thrown away - token.setError(fmt.Errorf("not currently connected and ResumeSubs not set")) - return token - case c.options.CleanSession && c.status.ConnectionStatus() == reconnecting: - // if reconnecting and cleanSession is true this sub will be thrown away - token.setError(fmt.Errorf("reconnecting state and cleansession is true")) - return token - } - } - sub := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket) - if sub.Topics, sub.Qoss, err = validateSubscribeMap(filters); err != nil { - token.setError(err) - return token - } - - if callback != nil { - for topic := range filters { - c.msgRouter.addRoute(topic, callback) - } - } - token.subs = make([]string, len(sub.Topics)) - copy(token.subs, sub.Topics) - - if sub.MessageID == 0 { - mID := c.getID(token) - if mID == 0 { - token.setError(fmt.Errorf("no message IDs available")) - return token - } - sub.MessageID = mID - token.messageID = mID - } - if c.options.ResumeSubs { // Only persist if we need this to resume subs after a disconnection - persistOutbound(c.persist, sub) - } - switch c.status.ConnectionStatus() { - case connecting: - DEBUG.Println(CLI, "storing subscribe message (connecting), topics:", sub.Topics) - case reconnecting: - DEBUG.Println(CLI, "storing subscribe message (reconnecting), topics:", sub.Topics) - case disconnecting: - DEBUG.Println(CLI, "storing subscribe message (disconnecting), topics:", sub.Topics) - default: - DEBUG.Println(CLI, "sending subscribe message, topics:", sub.Topics) - subscribeWaitTimeout := c.options.WriteTimeout - if subscribeWaitTimeout == 0 { - subscribeWaitTimeout = time.Second * 30 - } - select { - case c.oboundP <- &PacketAndToken{p: sub, t: token}: - case <-time.After(subscribeWaitTimeout): - token.setError(errors.New("subscribe was broken by timeout")) - } - } - DEBUG.Println(CLI, "exit SubscribeMultiple") - return token -} - -// reserveStoredPublishIDs reserves the ids for publish packets in the persistent store to ensure these are not duplicated -func (c *client) reserveStoredPublishIDs() { - // The resume function sets the stored id for publish packets only (some other packets - // will get new ids in net code). This means that the only keys we need to ensure are - // unique are the publish ones (and these will completed/replaced in resume() ) - if !c.options.CleanSession { - storedKeys := c.persist.All() - for _, key := range storedKeys { - packet := c.persist.Get(key) - if packet == nil { - continue - } - switch packet.(type) { - case *packets.PublishPacket: - details := packet.Details() - token := &PlaceHolderToken{id: details.MessageID} - c.claimID(token, details.MessageID) - } - } - } -} - -// Load all stored messages and resend them -// Call this to ensure QOS > 1,2 even after an application crash -// Note: This function will exit if c.stop is closed (this allows the shutdown to proceed avoiding a potential deadlock) -// other than that it does not return until all messages in the store have been sent (connect() does not complete its -// token before this completes) -func (c *client) resume(subscription bool, ibound chan packets.ControlPacket) { - DEBUG.Println(STR, "enter Resume") - - // Prior to sending a message getSemaphore will be called and once sent releaseSemaphore will be called - // with the token (so semaphore can be released when ACK received if applicable). - // Using a weighted semaphore rather than channels because this retains ordering - getSemaphore := func() {} // Default = do nothing - releaseSemaphore := func(_ *PublishToken) {} // Default = do nothing - var sem *semaphore.Weighted - if c.options.MaxResumePubInFlight > 0 { - sem = semaphore.NewWeighted(int64(c.options.MaxResumePubInFlight)) - ctx, cancel := context.WithCancel(context.Background()) // Context needed for semaphore - defer cancel() // ensure context gets cancelled - - go func() { - select { - case <-c.stop: // Request to stop (due to comm error etc) - cancel() - case <-ctx.Done(): // resume completed normally - } - }() - - getSemaphore = func() { sem.Acquire(ctx, 1) } - releaseSemaphore = func(token *PublishToken) { // Note: If token never completes then resume() may stall (will still exit on ctx.Done()) - go func() { - select { - case <-token.Done(): - case <-ctx.Done(): - } - sem.Release(1) - }() - } - } - - storedKeys := c.persist.All() - for _, key := range storedKeys { - packet := c.persist.Get(key) - if packet == nil { - DEBUG.Println(STR, fmt.Sprintf("resume found NIL packet (%s)", key)) - continue - } - details := packet.Details() - if isKeyOutbound(key) { - switch p := packet.(type) { - case *packets.SubscribePacket: - if subscription { - DEBUG.Println(STR, fmt.Sprintf("loaded pending subscribe (%d)", details.MessageID)) - subPacket := packet.(*packets.SubscribePacket) - token := newToken(packets.Subscribe).(*SubscribeToken) - token.messageID = details.MessageID - token.subs = append(token.subs, subPacket.Topics...) - c.claimID(token, details.MessageID) - select { - case c.oboundP <- &PacketAndToken{p: packet, t: token}: - case <-c.stop: - DEBUG.Println(STR, "resume exiting due to stop") - return - } - } else { - c.persist.Del(key) // Unsubscribe packets should not be retained following a reconnect - } - case *packets.UnsubscribePacket: - if subscription { - DEBUG.Println(STR, fmt.Sprintf("loaded pending unsubscribe (%d)", details.MessageID)) - token := newToken(packets.Unsubscribe).(*UnsubscribeToken) - select { - case c.oboundP <- &PacketAndToken{p: packet, t: token}: - case <-c.stop: - DEBUG.Println(STR, "resume exiting due to stop") - return - } - } else { - c.persist.Del(key) // Unsubscribe packets should not be retained following a reconnect - } - case *packets.PubrelPacket: - DEBUG.Println(STR, fmt.Sprintf("loaded pending pubrel (%d)", details.MessageID)) - select { - case c.oboundP <- &PacketAndToken{p: packet, t: nil}: - case <-c.stop: - DEBUG.Println(STR, "resume exiting due to stop") - return - } - case *packets.PublishPacket: - // spec: If the DUP flag is set to 0, it indicates that this is the first occasion that the Client or - // Server has attempted to send this MQTT PUBLISH Packet. If the DUP flag is set to 1, it indicates that - // this might be re-delivery of an earlier attempt to send the Packet. - // - // If the message is in the store than an attempt at delivery has been made (note that the message may - // never have made it onto the wire but tracking that would be complicated!). - if p.Qos != 0 { // spec: The DUP flag MUST be set to 0 for all QoS 0 messages - p.Dup = true - } - token := newToken(packets.Publish).(*PublishToken) - token.messageID = details.MessageID - c.claimID(token, details.MessageID) - DEBUG.Println(STR, fmt.Sprintf("loaded pending publish (%d)", details.MessageID)) - DEBUG.Println(STR, details) - getSemaphore() - select { - case c.obound <- &PacketAndToken{p: p, t: token}: - case <-c.stop: - DEBUG.Println(STR, "resume exiting due to stop") - return - } - releaseSemaphore(token) // If limiting simultaneous messages then we need to know when message is acknowledged - default: - ERROR.Println(STR, fmt.Sprintf("invalid message type (inbound - %T) in store (discarded)", packet)) - c.persist.Del(key) - } - } else { - switch packet.(type) { - case *packets.PubrelPacket: - DEBUG.Println(STR, fmt.Sprintf("loaded pending incomming (%d)", details.MessageID)) - select { - case ibound <- packet: - case <-c.stop: - DEBUG.Println(STR, "resume exiting due to stop (ibound <- packet)") - return - } - default: - ERROR.Println(STR, fmt.Sprintf("invalid message type (%T) in store (discarded)", packet)) - c.persist.Del(key) - } - } - } - DEBUG.Println(STR, "exit resume") -} - -// Unsubscribe will end the subscription from each of the topics provided. -// Messages published to those topics from other clients will no longer be -// received. -func (c *client) Unsubscribe(topics ...string) Token { - token := newToken(packets.Unsubscribe).(*UnsubscribeToken) - DEBUG.Println(CLI, "enter Unsubscribe") - if !c.IsConnected() { - token.setError(ErrNotConnected) - return token - } - if !c.IsConnectionOpen() { - switch { - case !c.options.ResumeSubs: - // if not connected and resumeSubs not set this unsub will be thrown away - token.setError(fmt.Errorf("not currently connected and ResumeSubs not set")) - return token - case c.options.CleanSession && c.status.ConnectionStatus() == reconnecting: - // if reconnecting and cleanSession is true this unsub will be thrown away - token.setError(fmt.Errorf("reconnecting state and cleansession is true")) - return token - } - } - unsub := packets.NewControlPacket(packets.Unsubscribe).(*packets.UnsubscribePacket) - unsub.Topics = make([]string, len(topics)) - copy(unsub.Topics, topics) - - if unsub.MessageID == 0 { - mID := c.getID(token) - if mID == 0 { - token.setError(fmt.Errorf("no message IDs available")) - return token - } - unsub.MessageID = mID - token.messageID = mID - } - - if c.options.ResumeSubs { // Only persist if we need this to resume subs after a disconnection - persistOutbound(c.persist, unsub) - } - - switch c.status.ConnectionStatus() { - case connecting: - DEBUG.Println(CLI, "storing unsubscribe message (connecting), topics:", topics) - case reconnecting: - DEBUG.Println(CLI, "storing unsubscribe message (reconnecting), topics:", topics) - case disconnecting: - DEBUG.Println(CLI, "storing unsubscribe message (reconnecting), topics:", topics) - default: - DEBUG.Println(CLI, "sending unsubscribe message, topics:", topics) - subscribeWaitTimeout := c.options.WriteTimeout - if subscribeWaitTimeout == 0 { - subscribeWaitTimeout = time.Second * 30 - } - select { - case c.oboundP <- &PacketAndToken{p: unsub, t: token}: - for _, topic := range topics { - c.msgRouter.deleteRoute(topic) - } - case <-time.After(subscribeWaitTimeout): - token.setError(errors.New("unsubscribe was broken by timeout")) - } - } - - DEBUG.Println(CLI, "exit Unsubscribe") - return token -} - -// OptionsReader returns a ClientOptionsReader which is a copy of the clientoptions -// in use by the client. -func (c *client) OptionsReader() ClientOptionsReader { - r := ClientOptionsReader{options: &c.options} - return r -} - -// DefaultConnectionLostHandler is a definition of a function that simply -// reports to the DEBUG log the reason for the client losing a connection. -func DefaultConnectionLostHandler(client Client, reason error) { - DEBUG.Println("Connection lost:", reason.Error()) -} - -// UpdateLastReceived - Will be called whenever a packet is received off the network -// This is used by the keepalive routine to -func (c *client) UpdateLastReceived() { - if c.options.KeepAlive != 0 { - c.lastReceived.Store(time.Now()) - } -} - -// UpdateLastReceived - Will be called whenever a packet is successfully transmitted to the network -func (c *client) UpdateLastSent() { - if c.options.KeepAlive != 0 { - c.lastSent.Store(time.Now()) - } -} - -// getWriteTimeOut returns the writetimeout (duration to wait when writing to the connection) or 0 if none -func (c *client) getWriteTimeOut() time.Duration { - return c.options.WriteTimeout -} - -// persistOutbound adds the packet to the outbound store -func (c *client) persistOutbound(m packets.ControlPacket) { - persistOutbound(c.persist, m) -} - -// persistInbound adds the packet to the inbound store -func (c *client) persistInbound(m packets.ControlPacket) { - persistInbound(c.persist, m) -} - -// pingRespReceived will be called by the network routines when a ping response is received -func (c *client) pingRespReceived() { - atomic.StoreInt32(&c.pingOutstanding, 0) -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/components.go b/vendor/github.com/eclipse/paho.mqtt.golang/components.go deleted file mode 100644 index 524db031..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/components.go +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -type component string - -// Component names for debug output -const ( - NET component = "[net] " - PNG component = "[pinger] " - CLI component = "[client] " - DEC component = "[decode] " - MES component = "[message] " - STR component = "[store] " - MID component = "[msgids] " - TST component = "[test] " - STA component = "[state] " - ERR component = "[error] " - ROU component = "[router] " -) diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/edl-v10 b/vendor/github.com/eclipse/paho.mqtt.golang/edl-v10 deleted file mode 100644 index cf989f14..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/edl-v10 +++ /dev/null @@ -1,15 +0,0 @@ - -Eclipse Distribution License - v 1.0 - -Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors. - -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/epl-v20 b/vendor/github.com/eclipse/paho.mqtt.golang/epl-v20 deleted file mode 100644 index e55f3446..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/epl-v20 +++ /dev/null @@ -1,277 +0,0 @@ -Eclipse Public License - v 2.0 - - THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE - PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION - OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. - -1. DEFINITIONS - -"Contribution" means: - - a) in the case of the initial Contributor, the initial content - Distributed under this Agreement, and - - b) in the case of each subsequent Contributor: - i) changes to the Program, and - ii) additions to the Program; - where such changes and/or additions to the Program originate from - and are Distributed by that particular Contributor. A Contribution - "originates" from a Contributor if it was added to the Program by - such Contributor itself or anyone acting on such Contributor's behalf. - Contributions do not include changes or additions to the Program that - are not Modified Works. - -"Contributor" means any person or entity that Distributes the Program. - -"Licensed Patents" mean patent claims licensable by a Contributor which -are necessarily infringed by the use or sale of its Contribution alone -or when combined with the Program. - -"Program" means the Contributions Distributed in accordance with this -Agreement. - -"Recipient" means anyone who receives the Program under this Agreement -or any Secondary License (as applicable), including Contributors. - -"Derivative Works" shall mean any work, whether in Source Code or other -form, that is based on (or derived from) the Program and for which the -editorial revisions, annotations, elaborations, or other modifications -represent, as a whole, an original work of authorship. - -"Modified Works" shall mean any work in Source Code or other form that -results from an addition to, deletion from, or modification of the -contents of the Program, including, for purposes of clarity any new file -in Source Code form that contains any contents of the Program. Modified -Works shall not include works that contain only declarations, -interfaces, types, classes, structures, or files of the Program solely -in each case in order to link to, bind by name, or subclass the Program -or Modified Works thereof. - -"Distribute" means the acts of a) distributing or b) making available -in any manner that enables the transfer of a copy. - -"Source Code" means the form of a Program preferred for making -modifications, including but not limited to software source code, -documentation source, and configuration files. - -"Secondary License" means either the GNU General Public License, -Version 2.0, or any later versions of that license, including any -exceptions or additional permissions as identified by the initial -Contributor. - -2. GRANT OF RIGHTS - - a) Subject to the terms of this Agreement, each Contributor hereby - grants Recipient a non-exclusive, worldwide, royalty-free copyright - license to reproduce, prepare Derivative Works of, publicly display, - publicly perform, Distribute and sublicense the Contribution of such - Contributor, if any, and such Derivative Works. - - b) Subject to the terms of this Agreement, each Contributor hereby - grants Recipient a non-exclusive, worldwide, royalty-free patent - license under Licensed Patents to make, use, sell, offer to sell, - import and otherwise transfer the Contribution of such Contributor, - if any, in Source Code or other form. This patent license shall - apply to the combination of the Contribution and the Program if, at - the time the Contribution is added by the Contributor, such addition - of the Contribution causes such combination to be covered by the - Licensed Patents. The patent license shall not apply to any other - combinations which include the Contribution. No hardware per se is - licensed hereunder. - - c) Recipient understands that although each Contributor grants the - licenses to its Contributions set forth herein, no assurances are - provided by any Contributor that the Program does not infringe the - patent or other intellectual property rights of any other entity. - Each Contributor disclaims any liability to Recipient for claims - brought by any other entity based on infringement of intellectual - property rights or otherwise. As a condition to exercising the - rights and licenses granted hereunder, each Recipient hereby - assumes sole responsibility to secure any other intellectual - property rights needed, if any. For example, if a third party - patent license is required to allow Recipient to Distribute the - Program, it is Recipient's responsibility to acquire that license - before distributing the Program. - - d) Each Contributor represents that to its knowledge it has - sufficient copyright rights in its Contribution, if any, to grant - the copyright license set forth in this Agreement. - - e) Notwithstanding the terms of any Secondary License, no - Contributor makes additional grants to any Recipient (other than - those set forth in this Agreement) as a result of such Recipient's - receipt of the Program under the terms of a Secondary License - (if permitted under the terms of Section 3). - -3. REQUIREMENTS - -3.1 If a Contributor Distributes the Program in any form, then: - - a) the Program must also be made available as Source Code, in - accordance with section 3.2, and the Contributor must accompany - the Program with a statement that the Source Code for the Program - is available under this Agreement, and informs Recipients how to - obtain it in a reasonable manner on or through a medium customarily - used for software exchange; and - - b) the Contributor may Distribute the Program under a license - different than this Agreement, provided that such license: - i) effectively disclaims on behalf of all other Contributors all - warranties and conditions, express and implied, including - warranties or conditions of title and non-infringement, and - implied warranties or conditions of merchantability and fitness - for a particular purpose; - - ii) effectively excludes on behalf of all other Contributors all - liability for damages, including direct, indirect, special, - incidental and consequential damages, such as lost profits; - - iii) does not attempt to limit or alter the recipients' rights - in the Source Code under section 3.2; and - - iv) requires any subsequent distribution of the Program by any - party to be under a license that satisfies the requirements - of this section 3. - -3.2 When the Program is Distributed as Source Code: - - a) it must be made available under this Agreement, or if the - Program (i) is combined with other material in a separate file or - files made available under a Secondary License, and (ii) the initial - Contributor attached to the Source Code the notice described in - Exhibit A of this Agreement, then the Program may be made available - under the terms of such Secondary Licenses, and - - b) a copy of this Agreement must be included with each copy of - the Program. - -3.3 Contributors may not remove or alter any copyright, patent, -trademark, attribution notices, disclaimers of warranty, or limitations -of liability ("notices") contained within the Program from any copy of -the Program which they Distribute, provided that Contributors may add -their own appropriate notices. - -4. COMMERCIAL DISTRIBUTION - -Commercial distributors of software may accept certain responsibilities -with respect to end users, business partners and the like. While this -license is intended to facilitate the commercial use of the Program, -the Contributor who includes the Program in a commercial product -offering should do so in a manner which does not create potential -liability for other Contributors. Therefore, if a Contributor includes -the Program in a commercial product offering, such Contributor -("Commercial Contributor") hereby agrees to defend and indemnify every -other Contributor ("Indemnified Contributor") against any losses, -damages and costs (collectively "Losses") arising from claims, lawsuits -and other legal actions brought by a third party against the Indemnified -Contributor to the extent caused by the acts or omissions of such -Commercial Contributor in connection with its distribution of the Program -in a commercial product offering. The obligations in this section do not -apply to any claims or Losses relating to any actual or alleged -intellectual property infringement. In order to qualify, an Indemnified -Contributor must: a) promptly notify the Commercial Contributor in -writing of such claim, and b) allow the Commercial Contributor to control, -and cooperate with the Commercial Contributor in, the defense and any -related settlement negotiations. The Indemnified Contributor may -participate in any such claim at its own expense. - -For example, a Contributor might include the Program in a commercial -product offering, Product X. That Contributor is then a Commercial -Contributor. If that Commercial Contributor then makes performance -claims, or offers warranties related to Product X, those performance -claims and warranties are such Commercial Contributor's responsibility -alone. Under this section, the Commercial Contributor would have to -defend claims against the other Contributors related to those performance -claims and warranties, and if a court requires any other Contributor to -pay any damages as a result, the Commercial Contributor must pay -those damages. - -5. NO WARRANTY - -EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT -PERMITTED BY APPLICABLE LAW, THE PROGRAM IS PROVIDED ON AN "AS IS" -BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR -IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF -TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR -PURPOSE. Each Recipient is solely responsible for determining the -appropriateness of using and distributing the Program and assumes all -risks associated with its exercise of rights under this Agreement, -including but not limited to the risks and costs of program errors, -compliance with applicable laws, damage to or loss of data, programs -or equipment, and unavailability or interruption of operations. - -6. DISCLAIMER OF LIABILITY - -EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT -PERMITTED BY APPLICABLE LAW, NEITHER RECIPIENT NOR ANY CONTRIBUTORS -SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST -PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE -EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - -7. GENERAL - -If any provision of this Agreement is invalid or unenforceable under -applicable law, it shall not affect the validity or enforceability of -the remainder of the terms of this Agreement, and without further -action by the parties hereto, such provision shall be reformed to the -minimum extent necessary to make such provision valid and enforceable. - -If Recipient institutes patent litigation against any entity -(including a cross-claim or counterclaim in a lawsuit) alleging that the -Program itself (excluding combinations of the Program with other software -or hardware) infringes such Recipient's patent(s), then such Recipient's -rights granted under Section 2(b) shall terminate as of the date such -litigation is filed. - -All Recipient's rights under this Agreement shall terminate if it -fails to comply with any of the material terms or conditions of this -Agreement and does not cure such failure in a reasonable period of -time after becoming aware of such noncompliance. If all Recipient's -rights under this Agreement terminate, Recipient agrees to cease use -and distribution of the Program as soon as reasonably practicable. -However, Recipient's obligations under this Agreement and any licenses -granted by Recipient relating to the Program shall continue and survive. - -Everyone is permitted to copy and distribute copies of this Agreement, -but in order to avoid inconsistency the Agreement is copyrighted and -may only be modified in the following manner. The Agreement Steward -reserves the right to publish new versions (including revisions) of -this Agreement from time to time. No one other than the Agreement -Steward has the right to modify this Agreement. The Eclipse Foundation -is the initial Agreement Steward. The Eclipse Foundation may assign the -responsibility to serve as the Agreement Steward to a suitable separate -entity. Each new version of the Agreement will be given a distinguishing -version number. The Program (including Contributions) may always be -Distributed subject to the version of the Agreement under which it was -received. In addition, after a new version of the Agreement is published, -Contributor may elect to Distribute the Program (including its -Contributions) under the new version. - -Except as expressly stated in Sections 2(a) and 2(b) above, Recipient -receives no rights or licenses to the intellectual property of any -Contributor under this Agreement, whether expressly, by implication, -estoppel or otherwise. All rights in the Program not expressly granted -under this Agreement are reserved. Nothing in this Agreement is intended -to be enforceable by any entity that is not a Contributor or Recipient. -No third-party beneficiary rights are created under this Agreement. - -Exhibit A - Form of Secondary Licenses Notice - -"This Source Code may also be made available under the following -Secondary Licenses when the conditions for such availability set forth -in the Eclipse Public License, v. 2.0 are satisfied: {name license(s), -version(s), and exceptions or additional permissions here}." - - Simply including a copy of this Agreement, including this Exhibit A - is not sufficient to license the Source Code under Secondary Licenses. - - If it is not possible or desirable to put the notice in a particular - file, then You may include the notice in a location (such as a LICENSE - file in a relevant directory) where a recipient would be likely to - look for such a notice. - - You may add additional accurate notices of copyright ownership. \ No newline at end of file diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/filestore.go b/vendor/github.com/eclipse/paho.mqtt.golang/filestore.go deleted file mode 100644 index fcfaa12e..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/filestore.go +++ /dev/null @@ -1,261 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import ( - "io/ioutil" - "os" - "path" - "sort" - "sync" - - "github.com/eclipse/paho.mqtt.golang/packets" -) - -const ( - msgExt = ".msg" - tmpExt = ".tmp" - corruptExt = ".CORRUPT" -) - -// FileStore implements the store interface using the filesystem to provide -// true persistence, even across client failure. This is designed to use a -// single directory per running client. If you are running multiple clients -// on the same filesystem, you will need to be careful to specify unique -// store directories for each. -type FileStore struct { - sync.RWMutex - directory string - opened bool -} - -// NewFileStore will create a new FileStore which stores its messages in the -// directory provided. -func NewFileStore(directory string) *FileStore { - store := &FileStore{ - directory: directory, - opened: false, - } - return store -} - -// Open will allow the FileStore to be used. -func (store *FileStore) Open() { - store.Lock() - defer store.Unlock() - // if no store directory was specified in ClientOpts, by default use the - // current working directory - if store.directory == "" { - store.directory, _ = os.Getwd() - } - - // if store dir exists, great, otherwise, create it - if !exists(store.directory) { - perms := os.FileMode(0770) - merr := os.MkdirAll(store.directory, perms) - chkerr(merr) - } - store.opened = true - DEBUG.Println(STR, "store is opened at", store.directory) -} - -// Close will disallow the FileStore from being used. -func (store *FileStore) Close() { - store.Lock() - defer store.Unlock() - store.opened = false - DEBUG.Println(STR, "store is closed") -} - -// Put will put a message into the store, associated with the provided -// key value. -func (store *FileStore) Put(key string, m packets.ControlPacket) { - store.Lock() - defer store.Unlock() - if !store.opened { - ERROR.Println(STR, "Trying to use file store, but not open") - return - } - full := fullpath(store.directory, key) - write(store.directory, key, m) - if !exists(full) { - ERROR.Println(STR, "file not created:", full) - } -} - -// Get will retrieve a message from the store, the one associated with -// the provided key value. -func (store *FileStore) Get(key string) packets.ControlPacket { - store.RLock() - defer store.RUnlock() - if !store.opened { - ERROR.Println(STR, "trying to use file store, but not open") - return nil - } - filepath := fullpath(store.directory, key) - if !exists(filepath) { - return nil - } - mfile, oerr := os.Open(filepath) - chkerr(oerr) - msg, rerr := packets.ReadPacket(mfile) - chkerr(mfile.Close()) - - // Message was unreadable, return nil - if rerr != nil { - newpath := corruptpath(store.directory, key) - WARN.Println(STR, "corrupted file detected:", rerr.Error(), "archived at:", newpath) - if err := os.Rename(filepath, newpath); err != nil { - ERROR.Println(STR, err) - } - return nil - } - return msg -} - -// All will provide a list of all of the keys associated with messages -// currently residing in the FileStore. -func (store *FileStore) All() []string { - store.RLock() - defer store.RUnlock() - return store.all() -} - -// Del will remove the persisted message associated with the provided -// key from the FileStore. -func (store *FileStore) Del(key string) { - store.Lock() - defer store.Unlock() - store.del(key) -} - -// Reset will remove all persisted messages from the FileStore. -func (store *FileStore) Reset() { - store.Lock() - defer store.Unlock() - WARN.Println(STR, "FileStore Reset") - for _, key := range store.all() { - store.del(key) - } -} - -// lockless -func (store *FileStore) all() []string { - var err error - var keys []string - var files fileInfos - - if !store.opened { - ERROR.Println(STR, "trying to use file store, but not open") - return nil - } - - files, err = ioutil.ReadDir(store.directory) - chkerr(err) - sort.Sort(files) - for _, f := range files { - DEBUG.Println(STR, "file in All():", f.Name()) - name := f.Name() - if len(name) < len(msgExt) || name[len(name)-len(msgExt):] != msgExt { - DEBUG.Println(STR, "skipping file, doesn't have right extension: ", name) - continue - } - key := name[0 : len(name)-4] // remove file extension - keys = append(keys, key) - } - return keys -} - -// lockless -func (store *FileStore) del(key string) { - if !store.opened { - ERROR.Println(STR, "trying to use file store, but not open") - return - } - DEBUG.Println(STR, "store del filepath:", store.directory) - DEBUG.Println(STR, "store delete key:", key) - filepath := fullpath(store.directory, key) - DEBUG.Println(STR, "path of deletion:", filepath) - if !exists(filepath) { - WARN.Println(STR, "store could not delete key:", key) - return - } - rerr := os.Remove(filepath) - chkerr(rerr) - DEBUG.Println(STR, "del msg:", key) - if exists(filepath) { - ERROR.Println(STR, "file not deleted:", filepath) - } -} - -func fullpath(store string, key string) string { - p := path.Join(store, key+msgExt) - return p -} - -func tmppath(store string, key string) string { - p := path.Join(store, key+tmpExt) - return p -} - -func corruptpath(store string, key string) string { - p := path.Join(store, key+corruptExt) - return p -} - -// create file called "X.[messageid].tmp" located in the store -// the contents of the file is the bytes of the message, then -// rename it to "X.[messageid].msg", overwriting any existing -// message with the same id -// X will be 'i' for inbound messages, and O for outbound messages -func write(store, key string, m packets.ControlPacket) { - temppath := tmppath(store, key) - f, err := os.Create(temppath) - chkerr(err) - werr := m.Write(f) - chkerr(werr) - cerr := f.Close() - chkerr(cerr) - rerr := os.Rename(temppath, fullpath(store, key)) - chkerr(rerr) -} - -func exists(file string) bool { - if _, err := os.Stat(file); err != nil { - if os.IsNotExist(err) { - return false - } - chkerr(err) - } - return true -} - -type fileInfos []os.FileInfo - -func (f fileInfos) Len() int { - return len(f) -} - -func (f fileInfos) Swap(i, j int) { - f[i], f[j] = f[j], f[i] -} - -func (f fileInfos) Less(i, j int) bool { - return f[i].ModTime().Before(f[j].ModTime()) -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/memstore.go b/vendor/github.com/eclipse/paho.mqtt.golang/memstore.go deleted file mode 100644 index e9f80882..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/memstore.go +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import ( - "sync" - - "github.com/eclipse/paho.mqtt.golang/packets" -) - -// MemoryStore implements the store interface to provide a "persistence" -// mechanism wholly stored in memory. This is only useful for -// as long as the client instance exists. -type MemoryStore struct { - sync.RWMutex - messages map[string]packets.ControlPacket - opened bool -} - -// NewMemoryStore returns a pointer to a new instance of -// MemoryStore, the instance is not initialized and ready to -// use until Open() has been called on it. -func NewMemoryStore() *MemoryStore { - store := &MemoryStore{ - messages: make(map[string]packets.ControlPacket), - opened: false, - } - return store -} - -// Open initializes a MemoryStore instance. -func (store *MemoryStore) Open() { - store.Lock() - defer store.Unlock() - store.opened = true - DEBUG.Println(STR, "memorystore initialized") -} - -// Put takes a key and a pointer to a Message and stores the -// message. -func (store *MemoryStore) Put(key string, message packets.ControlPacket) { - store.Lock() - defer store.Unlock() - if !store.opened { - ERROR.Println(STR, "Trying to use memory store, but not open") - return - } - store.messages[key] = message -} - -// Get takes a key and looks in the store for a matching Message -// returning either the Message pointer or nil. -func (store *MemoryStore) Get(key string) packets.ControlPacket { - store.RLock() - defer store.RUnlock() - if !store.opened { - ERROR.Println(STR, "Trying to use memory store, but not open") - return nil - } - mid := mIDFromKey(key) - m := store.messages[key] - if m == nil { - CRITICAL.Println(STR, "memorystore get: message", mid, "not found") - } else { - DEBUG.Println(STR, "memorystore get: message", mid, "found") - } - return m -} - -// All returns a slice of strings containing all the keys currently -// in the MemoryStore. -func (store *MemoryStore) All() []string { - store.RLock() - defer store.RUnlock() - if !store.opened { - ERROR.Println(STR, "Trying to use memory store, but not open") - return nil - } - var keys []string - for k := range store.messages { - keys = append(keys, k) - } - return keys -} - -// Del takes a key, searches the MemoryStore and if the key is found -// deletes the Message pointer associated with it. -func (store *MemoryStore) Del(key string) { - store.Lock() - defer store.Unlock() - if !store.opened { - ERROR.Println(STR, "Trying to use memory store, but not open") - return - } - mid := mIDFromKey(key) - m := store.messages[key] - if m == nil { - WARN.Println(STR, "memorystore del: message", mid, "not found") - } else { - delete(store.messages, key) - DEBUG.Println(STR, "memorystore del: message", mid, "was deleted") - } -} - -// Close will disallow modifications to the state of the store. -func (store *MemoryStore) Close() { - store.Lock() - defer store.Unlock() - if !store.opened { - ERROR.Println(STR, "Trying to close memory store, but not open") - return - } - store.opened = false - DEBUG.Println(STR, "memorystore closed") -} - -// Reset eliminates all persisted message data in the store. -func (store *MemoryStore) Reset() { - store.Lock() - defer store.Unlock() - if !store.opened { - ERROR.Println(STR, "Trying to reset memory store, but not open") - } - store.messages = make(map[string]packets.ControlPacket) - WARN.Println(STR, "memorystore wiped") -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/memstore_ordered.go b/vendor/github.com/eclipse/paho.mqtt.golang/memstore_ordered.go deleted file mode 100644 index 498b82b8..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/memstore_ordered.go +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - * Matt Brittan - */ - -package mqtt - -import ( - "sort" - "sync" - "time" - - "github.com/eclipse/paho.mqtt.golang/packets" -) - -// OrderedMemoryStore uses a map internally so the order in which All() returns packets is -// undefined. OrderedMemoryStore resolves this by storing the time the message is added -// and sorting based upon this. - -// storedMessage encapsulates a message and the time it was initially stored -type storedMessage struct { - ts time.Time - msg packets.ControlPacket -} - -// OrderedMemoryStore implements the store interface to provide a "persistence" -// mechanism wholly stored in memory. This is only useful for -// as long as the client instance exists. -type OrderedMemoryStore struct { - sync.RWMutex - messages map[string]storedMessage - opened bool -} - -// NewOrderedMemoryStore returns a pointer to a new instance of -// OrderedMemoryStore, the instance is not initialized and ready to -// use until Open() has been called on it. -func NewOrderedMemoryStore() *OrderedMemoryStore { - store := &OrderedMemoryStore{ - messages: make(map[string]storedMessage), - opened: false, - } - return store -} - -// Open initializes a OrderedMemoryStore instance. -func (store *OrderedMemoryStore) Open() { - store.Lock() - defer store.Unlock() - store.opened = true - DEBUG.Println(STR, "OrderedMemoryStore initialized") -} - -// Put takes a key and a pointer to a Message and stores the -// message. -func (store *OrderedMemoryStore) Put(key string, message packets.ControlPacket) { - store.Lock() - defer store.Unlock() - if !store.opened { - ERROR.Println(STR, "Trying to use memory store, but not open") - return - } - store.messages[key] = storedMessage{ts: time.Now(), msg: message} -} - -// Get takes a key and looks in the store for a matching Message -// returning either the Message pointer or nil. -func (store *OrderedMemoryStore) Get(key string) packets.ControlPacket { - store.RLock() - defer store.RUnlock() - if !store.opened { - ERROR.Println(STR, "Trying to use memory store, but not open") - return nil - } - mid := mIDFromKey(key) - m, ok := store.messages[key] - if !ok || m.msg == nil { - CRITICAL.Println(STR, "OrderedMemoryStore get: message", mid, "not found") - } else { - DEBUG.Println(STR, "OrderedMemoryStore get: message", mid, "found") - } - return m.msg -} - -// All returns a slice of strings containing all the keys currently -// in the OrderedMemoryStore. -func (store *OrderedMemoryStore) All() []string { - store.RLock() - defer store.RUnlock() - if !store.opened { - ERROR.Println(STR, "Trying to use memory store, but not open") - return nil - } - type tsAndKey struct { - ts time.Time - key string - } - - tsKeys := make([]tsAndKey, 0, len(store.messages)) - for k, v := range store.messages { - tsKeys = append(tsKeys, tsAndKey{ts: v.ts, key: k}) - } - sort.Slice(tsKeys, func(a int, b int) bool { return tsKeys[a].ts.Before(tsKeys[b].ts) }) - - keys := make([]string, len(tsKeys)) - for i := range tsKeys { - keys[i] = tsKeys[i].key - } - return keys -} - -// Del takes a key, searches the OrderedMemoryStore and if the key is found -// deletes the Message pointer associated with it. -func (store *OrderedMemoryStore) Del(key string) { - store.Lock() - defer store.Unlock() - if !store.opened { - ERROR.Println(STR, "Trying to use memory store, but not open") - return - } - mid := mIDFromKey(key) - _, ok := store.messages[key] - if !ok { - WARN.Println(STR, "OrderedMemoryStore del: message", mid, "not found") - } else { - delete(store.messages, key) - DEBUG.Println(STR, "OrderedMemoryStore del: message", mid, "was deleted") - } -} - -// Close will disallow modifications to the state of the store. -func (store *OrderedMemoryStore) Close() { - store.Lock() - defer store.Unlock() - if !store.opened { - ERROR.Println(STR, "Trying to close memory store, but not open") - return - } - store.opened = false - DEBUG.Println(STR, "OrderedMemoryStore closed") -} - -// Reset eliminates all persisted message data in the store. -func (store *OrderedMemoryStore) Reset() { - store.Lock() - defer store.Unlock() - if !store.opened { - ERROR.Println(STR, "Trying to reset memory store, but not open") - } - store.messages = make(map[string]storedMessage) - WARN.Println(STR, "OrderedMemoryStore wiped") -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/message.go b/vendor/github.com/eclipse/paho.mqtt.golang/message.go deleted file mode 100644 index 35b463f8..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/message.go +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import ( - "net/url" - "sync" - - "github.com/eclipse/paho.mqtt.golang/packets" -) - -// Message defines the externals that a message implementation must support -// these are received messages that are passed to the callbacks, not internal -// messages -type Message interface { - Duplicate() bool - Qos() byte - Retained() bool - Topic() string - MessageID() uint16 - Payload() []byte - Ack() -} - -type message struct { - duplicate bool - qos byte - retained bool - topic string - messageID uint16 - payload []byte - once sync.Once - ack func() -} - -func (m *message) Duplicate() bool { - return m.duplicate -} - -func (m *message) Qos() byte { - return m.qos -} - -func (m *message) Retained() bool { - return m.retained -} - -func (m *message) Topic() string { - return m.topic -} - -func (m *message) MessageID() uint16 { - return m.messageID -} - -func (m *message) Payload() []byte { - return m.payload -} - -func (m *message) Ack() { - m.once.Do(m.ack) -} - -func messageFromPublish(p *packets.PublishPacket, ack func()) Message { - return &message{ - duplicate: p.Dup, - qos: p.Qos, - retained: p.Retain, - topic: p.TopicName, - messageID: p.MessageID, - payload: p.Payload, - ack: ack, - } -} - -func newConnectMsgFromOptions(options *ClientOptions, broker *url.URL) *packets.ConnectPacket { - m := packets.NewControlPacket(packets.Connect).(*packets.ConnectPacket) - - m.CleanSession = options.CleanSession - m.WillFlag = options.WillEnabled - m.WillRetain = options.WillRetained - m.ClientIdentifier = options.ClientID - - if options.WillEnabled { - m.WillQos = options.WillQos - m.WillTopic = options.WillTopic - m.WillMessage = options.WillPayload - } - - username := options.Username - password := options.Password - if broker.User != nil { - username = broker.User.Username() - if pwd, ok := broker.User.Password(); ok { - password = pwd - } - } - if options.CredentialsProvider != nil { - username, password = options.CredentialsProvider() - } - - if username != "" { - m.UsernameFlag = true - m.Username = username - // mustn't have password without user as well - if password != "" { - m.PasswordFlag = true - m.Password = []byte(password) - } - } - - m.Keepalive = uint16(options.KeepAlive) - - return m -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/messageids.go b/vendor/github.com/eclipse/paho.mqtt.golang/messageids.go deleted file mode 100644 index 04c94bd3..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/messageids.go +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - * Matt Brittan - */ - -package mqtt - -import ( - "fmt" - "sync" - "time" -) - -// MId is 16 bit message id as specified by the MQTT spec. -// In general, these values should not be depended upon by -// the client application. -type MId uint16 - -type messageIds struct { - mu sync.RWMutex // Named to prevent Mu from being accessible directly via client - index map[uint16]tokenCompletor - - lastIssuedID uint16 // The most recently issued ID. Used so we cycle through ids rather than immediately reusing them (can make debugging easier) -} - -const ( - midMin uint16 = 1 - midMax uint16 = 65535 -) - -// cleanup clears the message ID map; completes all token types and sets error on PUB, SUB and UNSUB tokens. -func (mids *messageIds) cleanUp() { - mids.mu.Lock() - for _, token := range mids.index { - switch token.(type) { - case *PublishToken: - token.setError(fmt.Errorf("connection lost before Publish completed")) - case *SubscribeToken: - token.setError(fmt.Errorf("connection lost before Subscribe completed")) - case *UnsubscribeToken: - token.setError(fmt.Errorf("connection lost before Unsubscribe completed")) - case nil: // should not be any nil entries - continue - } - token.flowComplete() - } - mids.index = make(map[uint16]tokenCompletor) - mids.mu.Unlock() - DEBUG.Println(MID, "cleaned up") -} - -// cleanUpSubscribe removes all SUBSCRIBE and UNSUBSCRIBE tokens (setting error) -// This may be called when the connection is lost, and we will not be resending SUB/UNSUB packets -func (mids *messageIds) cleanUpSubscribe() { - mids.mu.Lock() - for mid, token := range mids.index { - switch token.(type) { - case *SubscribeToken: - token.setError(fmt.Errorf("connection lost before Subscribe completed")) - delete(mids.index, mid) - case *UnsubscribeToken: - token.setError(fmt.Errorf("connection lost before Unsubscribe completed")) - delete(mids.index, mid) - } - } - mids.mu.Unlock() - DEBUG.Println(MID, "cleaned up subs") -} - -func (mids *messageIds) freeID(id uint16) { - mids.mu.Lock() - delete(mids.index, id) - mids.mu.Unlock() -} - -func (mids *messageIds) claimID(token tokenCompletor, id uint16) { - mids.mu.Lock() - defer mids.mu.Unlock() - if _, ok := mids.index[id]; !ok { - mids.index[id] = token - } else { - old := mids.index[id] - old.flowComplete() - mids.index[id] = token - } - if id > mids.lastIssuedID { - mids.lastIssuedID = id - } -} - -// getID will return an available id or 0 if none available -// The id will generally be the previous id + 1 (because this makes tracing messages a bit simpler) -func (mids *messageIds) getID(t tokenCompletor) uint16 { - mids.mu.Lock() - defer mids.mu.Unlock() - i := mids.lastIssuedID // note: the only situation where lastIssuedID is 0 the map will be empty - looped := false // uint16 will loop from 65535->0 - for { - i++ - if i == 0 { // skip 0 because its not a valid id (Control Packets MUST contain a non-zero 16-bit Packet Identifier [MQTT-2.3.1-1]) - i++ - looped = true - } - if _, ok := mids.index[i]; !ok { - mids.index[i] = t - mids.lastIssuedID = i - return i - } - if (looped && i == mids.lastIssuedID) || (mids.lastIssuedID == 0 && i == midMax) { // lastIssuedID will be 0 at startup - return 0 // no free ids - } - } -} - -func (mids *messageIds) getToken(id uint16) tokenCompletor { - mids.mu.RLock() - defer mids.mu.RUnlock() - if token, ok := mids.index[id]; ok { - return token - } - return &DummyToken{id: id} -} - -type DummyToken struct { - id uint16 -} - -// Wait implements the Token Wait method. -func (d *DummyToken) Wait() bool { - return true -} - -// WaitTimeout implements the Token WaitTimeout method. -func (d *DummyToken) WaitTimeout(t time.Duration) bool { - return true -} - -// Done implements the Token Done method. -func (d *DummyToken) Done() <-chan struct{} { - ch := make(chan struct{}) - close(ch) - return ch -} - -func (d *DummyToken) flowComplete() { - ERROR.Printf("A lookup for token %d returned nil\n", d.id) -} - -func (d *DummyToken) Error() error { - return nil -} - -func (d *DummyToken) setError(e error) {} - -// PlaceHolderToken does nothing and was implemented to allow a messageid to be reserved -// it differs from DummyToken in that calling flowComplete does not generate an error (it -// is expected that flowComplete will be called when the token is overwritten with a real token) -type PlaceHolderToken struct { - id uint16 -} - -// Wait implements the Token Wait method. -func (p *PlaceHolderToken) Wait() bool { - return true -} - -// WaitTimeout implements the Token WaitTimeout method. -func (p *PlaceHolderToken) WaitTimeout(t time.Duration) bool { - return true -} - -// Done implements the Token Done method. -func (p *PlaceHolderToken) Done() <-chan struct{} { - ch := make(chan struct{}) - close(ch) - return ch -} - -func (p *PlaceHolderToken) flowComplete() { -} - -func (p *PlaceHolderToken) Error() error { - return nil -} - -func (p *PlaceHolderToken) setError(e error) {} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/net.go b/vendor/github.com/eclipse/paho.mqtt.golang/net.go deleted file mode 100644 index 10cc7dae..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/net.go +++ /dev/null @@ -1,470 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - * Matt Brittan - */ - -package mqtt - -import ( - "errors" - "io" - "net" - "reflect" - "strings" - "sync" - "time" - - "github.com/eclipse/paho.mqtt.golang/packets" -) - -const closedNetConnErrorText = "use of closed network connection" // error string for closed conn (https://golang.org/src/net/error_test.go) - -// ConnectMQTT takes a connected net.Conn and performs the initial MQTT handshake. Parameters are: -// conn - Connected net.Conn -// cm - Connect Packet with everything other than the protocol name/version populated (historical reasons) -// protocolVersion - The protocol version to attempt to connect with -// -// Note that, for backward compatibility, ConnectMQTT() suppresses the actual connection error (compare to connectMQTT()). -func ConnectMQTT(conn net.Conn, cm *packets.ConnectPacket, protocolVersion uint) (byte, bool) { - rc, sessionPresent, _ := connectMQTT(conn, cm, protocolVersion) - return rc, sessionPresent -} - -func connectMQTT(conn io.ReadWriter, cm *packets.ConnectPacket, protocolVersion uint) (byte, bool, error) { - switch protocolVersion { - case 3: - DEBUG.Println(CLI, "Using MQTT 3.1 protocol") - cm.ProtocolName = "MQIsdp" - cm.ProtocolVersion = 3 - case 0x83: - DEBUG.Println(CLI, "Using MQTT 3.1b protocol") - cm.ProtocolName = "MQIsdp" - cm.ProtocolVersion = 0x83 - case 0x84: - DEBUG.Println(CLI, "Using MQTT 3.1.1b protocol") - cm.ProtocolName = "MQTT" - cm.ProtocolVersion = 0x84 - default: - DEBUG.Println(CLI, "Using MQTT 3.1.1 protocol") - cm.ProtocolName = "MQTT" - cm.ProtocolVersion = 4 - } - - if err := cm.Write(conn); err != nil { - ERROR.Println(CLI, err) - return packets.ErrNetworkError, false, err - } - - rc, sessionPresent, err := verifyCONNACK(conn) - return rc, sessionPresent, err -} - -// This function is only used for receiving a connack -// when the connection is first started. -// This prevents receiving incoming data while resume -// is in progress if clean session is false. -func verifyCONNACK(conn io.Reader) (byte, bool, error) { - DEBUG.Println(NET, "connect started") - - ca, err := packets.ReadPacket(conn) - if err != nil { - ERROR.Println(NET, "connect got error", err) - return packets.ErrNetworkError, false, err - } - - if ca == nil { - ERROR.Println(NET, "received nil packet") - return packets.ErrNetworkError, false, errors.New("nil CONNACK packet") - } - - msg, ok := ca.(*packets.ConnackPacket) - if !ok { - ERROR.Println(NET, "received msg that was not CONNACK") - return packets.ErrNetworkError, false, errors.New("non-CONNACK first packet received") - } - - DEBUG.Println(NET, "received connack") - return msg.ReturnCode, msg.SessionPresent, nil -} - -// inbound encapsulates the output from startIncoming. -// err - If != nil then an error has occurred -// cp - A control packet received over the network link -type inbound struct { - err error - cp packets.ControlPacket -} - -// startIncoming initiates a goroutine that reads incoming messages off the wire and sends them to the channel (returned). -// If there are any issues with the network connection then the returned channel will be closed and the goroutine will exit -// (so closing the connection will terminate the goroutine) -func startIncoming(conn io.Reader) <-chan inbound { - var err error - var cp packets.ControlPacket - ibound := make(chan inbound) - - DEBUG.Println(NET, "incoming started") - - go func() { - for { - if cp, err = packets.ReadPacket(conn); err != nil { - // We do not want to log the error if it is due to the network connection having been closed - // elsewhere (i.e. after sending DisconnectPacket). Detecting this situation is the subject of - // https://github.com/golang/go/issues/4373 - if !strings.Contains(err.Error(), closedNetConnErrorText) { - ibound <- inbound{err: err} - } - close(ibound) - DEBUG.Println(NET, "incoming complete") - return - } - DEBUG.Println(NET, "startIncoming Received Message") - ibound <- inbound{cp: cp} - } - }() - - return ibound -} - -// incomingComms encapsulates the possible output of the incomingComms routine. If err != nil then an error has occurred and -// the routine will have terminated; otherwise one of the other members should be non-nil -type incomingComms struct { - err error // If non-nil then there has been an error (ignore everything else) - outbound *PacketAndToken // Packet (with token) than needs to be sent out (e.g. an acknowledgement) - incomingPub *packets.PublishPacket // A new publish has been received; this will need to be passed on to our user -} - -// startIncomingComms initiates incoming communications; this includes starting a goroutine to process incoming -// messages. -// Accepts a channel of inbound messages from the store (persisted messages); note this must be closed as soon as -// everything in the store has been sent. -// Returns a channel that will be passed any received packets; this will be closed on a network error (and inboundFromStore closed) -func startIncomingComms(conn io.Reader, - c commsFns, - inboundFromStore <-chan packets.ControlPacket, -) <-chan incomingComms { - ibound := startIncoming(conn) // Start goroutine that reads from network connection - output := make(chan incomingComms) - - DEBUG.Println(NET, "startIncomingComms started") - go func() { - for { - if inboundFromStore == nil && ibound == nil { - close(output) - DEBUG.Println(NET, "startIncomingComms goroutine complete") - return // As soon as ibound is closed we can exit (should have already processed an error) - } - DEBUG.Println(NET, "logic waiting for msg on ibound") - - var msg packets.ControlPacket - var ok bool - select { - case msg, ok = <-inboundFromStore: - if !ok { - DEBUG.Println(NET, "startIncomingComms: inboundFromStore complete") - inboundFromStore = nil // should happen quickly as this is only for persisted messages - continue - } - DEBUG.Println(NET, "startIncomingComms: got msg from store") - case ibMsg, ok := <-ibound: - if !ok { - DEBUG.Println(NET, "startIncomingComms: ibound complete") - ibound = nil - continue - } - DEBUG.Println(NET, "startIncomingComms: got msg on ibound") - // If the inbound comms routine encounters any issues it will send us an error. - if ibMsg.err != nil { - output <- incomingComms{err: ibMsg.err} - continue // Usually the channel will be closed immediately after sending an error but safer that we do not assume this - } - msg = ibMsg.cp - - c.persistInbound(msg) - c.UpdateLastReceived() // Notify keepalive logic that we recently received a packet - } - - switch m := msg.(type) { - case *packets.PingrespPacket: - DEBUG.Println(NET, "startIncomingComms: received pingresp") - c.pingRespReceived() - case *packets.SubackPacket: - DEBUG.Println(NET, "startIncomingComms: received suback, id:", m.MessageID) - token := c.getToken(m.MessageID) - - if t, ok := token.(*SubscribeToken); ok { - DEBUG.Println(NET, "startIncomingComms: granted qoss", m.ReturnCodes) - for i, qos := range m.ReturnCodes { - t.subResult[t.subs[i]] = qos - } - } - - token.flowComplete() - c.freeID(m.MessageID) - case *packets.UnsubackPacket: - DEBUG.Println(NET, "startIncomingComms: received unsuback, id:", m.MessageID) - c.getToken(m.MessageID).flowComplete() - c.freeID(m.MessageID) - case *packets.PublishPacket: - DEBUG.Println(NET, "startIncomingComms: received publish, msgId:", m.MessageID) - output <- incomingComms{incomingPub: m} - case *packets.PubackPacket: - DEBUG.Println(NET, "startIncomingComms: received puback, id:", m.MessageID) - c.getToken(m.MessageID).flowComplete() - c.freeID(m.MessageID) - case *packets.PubrecPacket: - DEBUG.Println(NET, "startIncomingComms: received pubrec, id:", m.MessageID) - prel := packets.NewControlPacket(packets.Pubrel).(*packets.PubrelPacket) - prel.MessageID = m.MessageID - output <- incomingComms{outbound: &PacketAndToken{p: prel, t: nil}} - case *packets.PubrelPacket: - DEBUG.Println(NET, "startIncomingComms: received pubrel, id:", m.MessageID) - pc := packets.NewControlPacket(packets.Pubcomp).(*packets.PubcompPacket) - pc.MessageID = m.MessageID - c.persistOutbound(pc) - output <- incomingComms{outbound: &PacketAndToken{p: pc, t: nil}} - case *packets.PubcompPacket: - DEBUG.Println(NET, "startIncomingComms: received pubcomp, id:", m.MessageID) - c.getToken(m.MessageID).flowComplete() - c.freeID(m.MessageID) - } - } - }() - return output -} - -// startOutgoingComms initiates a go routine to transmit outgoing packets. -// Pass in an open network connection and channels for outbound messages (including those triggered -// directly from incoming comms). -// Returns a channel that will receive details of any errors (closed when the goroutine exits) -// This function wil only terminate when all input channels are closed -func startOutgoingComms(conn net.Conn, - c commsFns, - oboundp <-chan *PacketAndToken, - obound <-chan *PacketAndToken, - oboundFromIncoming <-chan *PacketAndToken, -) <-chan error { - errChan := make(chan error) - DEBUG.Println(NET, "outgoing started") - - go func() { - for { - DEBUG.Println(NET, "outgoing waiting for an outbound message") - - // This goroutine will only exits when all of the input channels we receive on have been closed. This approach is taken to avoid any - // deadlocks (if the connection goes down there are limited options as to what we can do with anything waiting on us and - // throwing away the packets seems the best option) - if oboundp == nil && obound == nil && oboundFromIncoming == nil { - DEBUG.Println(NET, "outgoing comms stopping") - close(errChan) - return - } - - select { - case pub, ok := <-obound: - if !ok { - obound = nil - continue - } - msg := pub.p.(*packets.PublishPacket) - DEBUG.Println(NET, "obound msg to write", msg.MessageID) - - writeTimeout := c.getWriteTimeOut() - if writeTimeout > 0 { - if err := conn.SetWriteDeadline(time.Now().Add(writeTimeout)); err != nil { - ERROR.Println(NET, "SetWriteDeadline ", err) - } - } - - if err := msg.Write(conn); err != nil { - ERROR.Println(NET, "outgoing obound reporting error ", err) - pub.t.setError(err) - // report error if it's not due to the connection being closed elsewhere - if !strings.Contains(err.Error(), closedNetConnErrorText) { - errChan <- err - } - continue - } - - if writeTimeout > 0 { - // If we successfully wrote, we don't want the timeout to happen during an idle period - // so we reset it to infinite. - if err := conn.SetWriteDeadline(time.Time{}); err != nil { - ERROR.Println(NET, "SetWriteDeadline to 0 ", err) - } - } - - if msg.Qos == 0 { - pub.t.flowComplete() - } - DEBUG.Println(NET, "obound wrote msg, id:", msg.MessageID) - case msg, ok := <-oboundp: - if !ok { - oboundp = nil - continue - } - DEBUG.Println(NET, "obound priority msg to write, type", reflect.TypeOf(msg.p)) - if err := msg.p.Write(conn); err != nil { - ERROR.Println(NET, "outgoing oboundp reporting error ", err) - if msg.t != nil { - msg.t.setError(err) - } - errChan <- err - continue - } - - if _, ok := msg.p.(*packets.DisconnectPacket); ok { - msg.t.(*DisconnectToken).flowComplete() - DEBUG.Println(NET, "outbound wrote disconnect, closing connection") - // As per the MQTT spec "After sending a DISCONNECT Packet the Client MUST close the Network Connection" - // Closing the connection will cause the goroutines to end in sequence (starting with incoming comms) - _ = conn.Close() - } - case msg, ok := <-oboundFromIncoming: // message triggered by an inbound message (PubrecPacket or PubrelPacket) - if !ok { - oboundFromIncoming = nil - continue - } - DEBUG.Println(NET, "obound from incoming msg to write, type", reflect.TypeOf(msg.p), " ID ", msg.p.Details().MessageID) - if err := msg.p.Write(conn); err != nil { - ERROR.Println(NET, "outgoing oboundFromIncoming reporting error", err) - if msg.t != nil { - msg.t.setError(err) - } - errChan <- err - continue - } - } - c.UpdateLastSent() // Record that a packet has been received (for keepalive routine) - } - }() - return errChan -} - -// commsFns provide access to the client state (messageids, requesting disconnection and updating timing) -type commsFns interface { - getToken(id uint16) tokenCompletor // Retrieve the token for the specified messageid (if none then a dummy token must be returned) - freeID(id uint16) // Release the specified messageid (clearing out of any persistent store) - UpdateLastReceived() // Must be called whenever a packet is received - UpdateLastSent() // Must be called whenever a packet is successfully sent - getWriteTimeOut() time.Duration // Return the writetimeout (or 0 if none) - persistOutbound(m packets.ControlPacket) // add the packet to the outbound store - persistInbound(m packets.ControlPacket) // add the packet to the inbound store - pingRespReceived() // Called when a ping response is received -} - -// startComms initiates goroutines that handles communications over the network connection -// Messages will be stored (via commsFns) and deleted from the store as necessary -// It returns two channels: -// -// packets.PublishPacket - Will receive publish packets received over the network. -// Closed when incoming comms routines exit (on shutdown or if network link closed) -// error - Any errors will be sent on this channel. The channel is closed when all comms routines have shut down -// -// Note: The comms routines monitoring oboundp and obound will not shutdown until those channels are both closed. Any messages received between the -// connection being closed and those channels being closed will generate errors (and nothing will be sent). That way the chance of a deadlock is -// minimised. -func startComms(conn net.Conn, // Network connection (must be active) - c commsFns, // getters and setters to enable us to cleanly interact with client - inboundFromStore <-chan packets.ControlPacket, // Inbound packets from the persistence store (should be closed relatively soon after startup) - oboundp <-chan *PacketAndToken, - obound <-chan *PacketAndToken) ( - <-chan *packets.PublishPacket, // Publishpackages received over the network - <-chan error, // Any errors (should generally trigger a disconnect) -) { - // Start inbound comms handler; this needs to be able to transmit messages so we start a go routine to add these to the priority outbound channel - ibound := startIncomingComms(conn, c, inboundFromStore) - outboundFromIncoming := make(chan *PacketAndToken) // Will accept outgoing messages triggered by startIncomingComms (e.g. acknowledgements) - - // Start the outgoing handler. It is important to note that output from startIncomingComms is fed into startOutgoingComms (for ACK's) - oboundErr := startOutgoingComms(conn, c, oboundp, obound, outboundFromIncoming) - DEBUG.Println(NET, "startComms started") - - // Run up go routines to handle the output from the above comms functions - these are handled in separate - // go routines because they can interact (e.g. ibound triggers an ACK to obound which triggers an error) - var wg sync.WaitGroup - wg.Add(2) - - outPublish := make(chan *packets.PublishPacket) - outError := make(chan error) - - // Any messages received get passed to the appropriate channel - go func() { - for ic := range ibound { - if ic.err != nil { - outError <- ic.err - continue - } - if ic.outbound != nil { - outboundFromIncoming <- ic.outbound - continue - } - if ic.incomingPub != nil { - outPublish <- ic.incomingPub - continue - } - ERROR.Println(STR, "startComms received empty incomingComms msg") - } - // Close channels that will not be written to again (allowing other routines to exit) - close(outboundFromIncoming) - close(outPublish) - wg.Done() - }() - - // Any errors will be passed out to our caller - go func() { - for err := range oboundErr { - outError <- err - } - wg.Done() - }() - - // outError is used by both routines so can only be closed when they are both complete - go func() { - wg.Wait() - close(outError) - DEBUG.Println(NET, "startComms closing outError") - }() - - return outPublish, outError -} - -// ackFunc acknowledges a packet -// WARNING the function returned must not be called if the comms routine is shutting down or not running -// (it needs outgoing comms in order to send the acknowledgement). Currently this is only called from -// matchAndDispatch which will be shutdown before the comms are -func ackFunc(oboundP chan *PacketAndToken, persist Store, packet *packets.PublishPacket) func() { - return func() { - switch packet.Qos { - case 2: - pr := packets.NewControlPacket(packets.Pubrec).(*packets.PubrecPacket) - pr.MessageID = packet.MessageID - DEBUG.Println(NET, "putting pubrec msg on obound") - oboundP <- &PacketAndToken{p: pr, t: nil} - DEBUG.Println(NET, "done putting pubrec msg on obound") - case 1: - pa := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket) - pa.MessageID = packet.MessageID - DEBUG.Println(NET, "putting puback msg on obound") - persistOutbound(persist, pa) - oboundP <- &PacketAndToken{p: pa, t: nil} - DEBUG.Println(NET, "done putting puback msg on obound") - case 0: - // do nothing, since there is no need to send an ack packet back - } - } -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/netconn.go b/vendor/github.com/eclipse/paho.mqtt.golang/netconn.go deleted file mode 100644 index f5429e28..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/netconn.go +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - * MAtt Brittan - */ - -package mqtt - -import ( - "crypto/tls" - "errors" - "net" - "net/http" - "net/url" - "os" - "time" - - "golang.org/x/net/proxy" -) - -// -// This just establishes the network connection; once established the type of connection should be irrelevant -// - -// openConnection opens a network connection using the protocol indicated in the URL. -// Does not carry out any MQTT specific handshakes. -func openConnection(uri *url.URL, tlsc *tls.Config, timeout time.Duration, headers http.Header, websocketOptions *WebsocketOptions, dialer *net.Dialer) (net.Conn, error) { - switch uri.Scheme { - case "ws": - dialURI := *uri // #623 - Gorilla Websockets does not accept URL's where uri.User != nil - dialURI.User = nil - conn, err := NewWebsocket(dialURI.String(), nil, timeout, headers, websocketOptions) - return conn, err - case "wss": - dialURI := *uri // #623 - Gorilla Websockets does not accept URL's where uri.User != nil - dialURI.User = nil - conn, err := NewWebsocket(dialURI.String(), tlsc, timeout, headers, websocketOptions) - return conn, err - case "mqtt", "tcp": - allProxy := os.Getenv("all_proxy") - if len(allProxy) == 0 { - conn, err := dialer.Dial("tcp", uri.Host) - if err != nil { - return nil, err - } - return conn, nil - } - proxyDialer := proxy.FromEnvironment() - - conn, err := proxyDialer.Dial("tcp", uri.Host) - if err != nil { - return nil, err - } - return conn, nil - case "unix": - var conn net.Conn - var err error - - // this check is preserved for compatibility with older versions - // which used uri.Host only (it works for local paths, e.g. unix://socket.sock in current dir) - if len(uri.Host) > 0 { - conn, err = dialer.Dial("unix", uri.Host) - } else { - conn, err = dialer.Dial("unix", uri.Path) - } - - if err != nil { - return nil, err - } - return conn, nil - case "ssl", "tls", "mqtts", "mqtt+ssl", "tcps": - allProxy := os.Getenv("all_proxy") - if len(allProxy) == 0 { - conn, err := tls.DialWithDialer(dialer, "tcp", uri.Host, tlsc) - if err != nil { - return nil, err - } - return conn, nil - } - proxyDialer := proxy.FromEnvironment() - conn, err := proxyDialer.Dial("tcp", uri.Host) - if err != nil { - return nil, err - } - - tlsConn := tls.Client(conn, tlsc) - - err = tlsConn.Handshake() - if err != nil { - _ = conn.Close() - return nil, err - } - - return tlsConn, nil - } - return nil, errors.New("unknown protocol") -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/oops.go b/vendor/github.com/eclipse/paho.mqtt.golang/oops.go deleted file mode 100644 index c454aeba..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/oops.go +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -func chkerr(e error) { - if e != nil { - panic(e) - } -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/options.go b/vendor/github.com/eclipse/paho.mqtt.golang/options.go deleted file mode 100644 index 5aaa7d95..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/options.go +++ /dev/null @@ -1,457 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - * Måns Ansgariusson - */ - -// Portions copyright © 2018 TIBCO Software Inc. - -package mqtt - -import ( - "crypto/tls" - "net" - "net/http" - "net/url" - "strings" - "time" -) - -// CredentialsProvider allows the username and password to be updated -// before reconnecting. It should return the current username and password. -type CredentialsProvider func() (username string, password string) - -// MessageHandler is a callback type which can be set to be -// executed upon the arrival of messages published to topics -// to which the client is subscribed. -type MessageHandler func(Client, Message) - -// ConnectionLostHandler is a callback type which can be set to be -// executed upon an unintended disconnection from the MQTT broker. -// Disconnects caused by calling Disconnect or ForceDisconnect will -// not cause an OnConnectionLost callback to execute. -type ConnectionLostHandler func(Client, error) - -// OnConnectHandler is a callback that is called when the client -// state changes from unconnected/disconnected to connected. Both -// at initial connection and on reconnection -type OnConnectHandler func(Client) - -// ReconnectHandler is invoked prior to reconnecting after -// the initial connection is lost -type ReconnectHandler func(Client, *ClientOptions) - -// ConnectionAttemptHandler is invoked prior to making the initial connection. -type ConnectionAttemptHandler func(broker *url.URL, tlsCfg *tls.Config) *tls.Config - -// OpenConnectionFunc is invoked to establish the underlying network connection -// Its purpose if for custom network transports. -// Does not carry out any MQTT specific handshakes. -type OpenConnectionFunc func(uri *url.URL, options ClientOptions) (net.Conn, error) - -// ClientOptions contains configurable options for an Client. Note that these should be set using the -// relevant methods (e.g. AddBroker) rather than directly. See those functions for information on usage. -// WARNING: Create the below using NewClientOptions unless you have a compelling reason not to. It is easy -// to create a configuration with difficult to trace issues (e.g. Mosquitto 2.0.12+ will reject connections -// with KeepAlive=0 by default). -type ClientOptions struct { - Servers []*url.URL - ClientID string - Username string - Password string - CredentialsProvider CredentialsProvider - CleanSession bool - Order bool - WillEnabled bool - WillTopic string - WillPayload []byte - WillQos byte - WillRetained bool - ProtocolVersion uint - protocolVersionExplicit bool - TLSConfig *tls.Config - KeepAlive int64 // Warning: Some brokers may reject connections with Keepalive = 0. - PingTimeout time.Duration - ConnectTimeout time.Duration - MaxReconnectInterval time.Duration - AutoReconnect bool - ConnectRetryInterval time.Duration - ConnectRetry bool - Store Store - DefaultPublishHandler MessageHandler - OnConnect OnConnectHandler - OnConnectionLost ConnectionLostHandler - OnReconnecting ReconnectHandler - OnConnectAttempt ConnectionAttemptHandler - WriteTimeout time.Duration - MessageChannelDepth uint - ResumeSubs bool - HTTPHeaders http.Header - WebsocketOptions *WebsocketOptions - MaxResumePubInFlight int // // 0 = no limit; otherwise this is the maximum simultaneous messages sent while resuming - Dialer *net.Dialer - CustomOpenConnectionFn OpenConnectionFunc - AutoAckDisabled bool -} - -// NewClientOptions will create a new ClientClientOptions type with some -// default values. -// Port: 1883 -// CleanSession: True -// Order: True (note: it is recommended that this be set to FALSE unless order is important) -// KeepAlive: 30 (seconds) -// ConnectTimeout: 30 (seconds) -// MaxReconnectInterval 10 (minutes) -// AutoReconnect: True -func NewClientOptions() *ClientOptions { - o := &ClientOptions{ - Servers: nil, - ClientID: "", - Username: "", - Password: "", - CleanSession: true, - Order: true, - WillEnabled: false, - WillTopic: "", - WillPayload: nil, - WillQos: 0, - WillRetained: false, - ProtocolVersion: 0, - protocolVersionExplicit: false, - KeepAlive: 30, - PingTimeout: 10 * time.Second, - ConnectTimeout: 30 * time.Second, - MaxReconnectInterval: 10 * time.Minute, - AutoReconnect: true, - ConnectRetryInterval: 30 * time.Second, - ConnectRetry: false, - Store: nil, - OnConnect: nil, - OnConnectionLost: DefaultConnectionLostHandler, - OnConnectAttempt: nil, - WriteTimeout: 0, // 0 represents timeout disabled - ResumeSubs: false, - HTTPHeaders: make(map[string][]string), - WebsocketOptions: &WebsocketOptions{}, - Dialer: &net.Dialer{Timeout: 30 * time.Second}, - CustomOpenConnectionFn: nil, - AutoAckDisabled: false, - } - return o -} - -// AddBroker adds a broker URI to the list of brokers to be used. The format should be -// scheme://host:port -// Where "scheme" is one of "tcp", "ssl", or "ws", "host" is the ip-address (or hostname) -// and "port" is the port on which the broker is accepting connections. -// -// Default values for hostname is "127.0.0.1", for schema is "tcp://". -// -// An example broker URI would look like: tcp://foobar.com:1883 -func (o *ClientOptions) AddBroker(server string) *ClientOptions { - if len(server) > 0 && server[0] == ':' { - server = "127.0.0.1" + server - } - if !strings.Contains(server, "://") { - server = "tcp://" + server - } - brokerURI, err := url.Parse(server) - if err != nil { - ERROR.Println(CLI, "Failed to parse %q broker address: %s", server, err) - return o - } - o.Servers = append(o.Servers, brokerURI) - return o -} - -// SetResumeSubs will enable resuming of stored (un)subscribe messages when connecting -// but not reconnecting if CleanSession is false. Otherwise these messages are discarded. -func (o *ClientOptions) SetResumeSubs(resume bool) *ClientOptions { - o.ResumeSubs = resume - return o -} - -// SetClientID will set the client id to be used by this client when -// connecting to the MQTT broker. According to the MQTT v3.1 specification, -// a client id must be no longer than 23 characters. -func (o *ClientOptions) SetClientID(id string) *ClientOptions { - o.ClientID = id - return o -} - -// SetUsername will set the username to be used by this client when connecting -// to the MQTT broker. Note: without the use of SSL/TLS, this information will -// be sent in plaintext across the wire. -func (o *ClientOptions) SetUsername(u string) *ClientOptions { - o.Username = u - return o -} - -// SetPassword will set the password to be used by this client when connecting -// to the MQTT broker. Note: without the use of SSL/TLS, this information will -// be sent in plaintext across the wire. -func (o *ClientOptions) SetPassword(p string) *ClientOptions { - o.Password = p - return o -} - -// SetCredentialsProvider will set a method to be called by this client when -// connecting to the MQTT broker that provide the current username and password. -// Note: without the use of SSL/TLS, this information will be sent -// in plaintext across the wire. -func (o *ClientOptions) SetCredentialsProvider(p CredentialsProvider) *ClientOptions { - o.CredentialsProvider = p - return o -} - -// SetCleanSession will set the "clean session" flag in the connect message -// when this client connects to an MQTT broker. By setting this flag, you are -// indicating that no messages saved by the broker for this client should be -// delivered. Any messages that were going to be sent by this client before -// disconnecting previously but didn't will not be sent upon connecting to the -// broker. -func (o *ClientOptions) SetCleanSession(clean bool) *ClientOptions { - o.CleanSession = clean - return o -} - -// SetOrderMatters will set the message routing to guarantee order within -// each QoS level. By default, this value is true. If set to false (recommended), -// this flag indicates that messages can be delivered asynchronously -// from the client to the application and possibly arrive out of order. -// Specifically, the message handler is called in its own go routine. -// Note that setting this to true does not guarantee in-order delivery -// (this is subject to broker settings like "max_inflight_messages=1" in mosquitto) -// and if true then handlers must not block. -func (o *ClientOptions) SetOrderMatters(order bool) *ClientOptions { - o.Order = order - return o -} - -// SetTLSConfig will set an SSL/TLS configuration to be used when connecting -// to an MQTT broker. Please read the official Go documentation for more -// information. -func (o *ClientOptions) SetTLSConfig(t *tls.Config) *ClientOptions { - o.TLSConfig = t - return o -} - -// SetStore will set the implementation of the Store interface -// used to provide message persistence in cases where QoS levels -// QoS_ONE or QoS_TWO are used. If no store is provided, then the -// client will use MemoryStore by default. -func (o *ClientOptions) SetStore(s Store) *ClientOptions { - o.Store = s - return o -} - -// SetKeepAlive will set the amount of time (in seconds) that the client -// should wait before sending a PING request to the broker. This will -// allow the client to know that a connection has not been lost with the -// server. -func (o *ClientOptions) SetKeepAlive(k time.Duration) *ClientOptions { - o.KeepAlive = int64(k / time.Second) - return o -} - -// SetPingTimeout will set the amount of time (in seconds) that the client -// will wait after sending a PING request to the broker, before deciding -// that the connection has been lost. Default is 10 seconds. -func (o *ClientOptions) SetPingTimeout(k time.Duration) *ClientOptions { - o.PingTimeout = k - return o -} - -// SetProtocolVersion sets the MQTT version to be used to connect to the -// broker. Legitimate values are currently 3 - MQTT 3.1 or 4 - MQTT 3.1.1 -func (o *ClientOptions) SetProtocolVersion(pv uint) *ClientOptions { - if (pv >= 3 && pv <= 4) || (pv > 0x80) { - o.ProtocolVersion = pv - o.protocolVersionExplicit = true - } - return o -} - -// UnsetWill will cause any set will message to be disregarded. -func (o *ClientOptions) UnsetWill() *ClientOptions { - o.WillEnabled = false - return o -} - -// SetWill accepts a string will message to be set. When the client connects, -// it will give this will message to the broker, which will then publish the -// provided payload (the will) to any clients that are subscribed to the provided -// topic. -func (o *ClientOptions) SetWill(topic string, payload string, qos byte, retained bool) *ClientOptions { - o.SetBinaryWill(topic, []byte(payload), qos, retained) - return o -} - -// SetBinaryWill accepts a []byte will message to be set. When the client connects, -// it will give this will message to the broker, which will then publish the -// provided payload (the will) to any clients that are subscribed to the provided -// topic. -func (o *ClientOptions) SetBinaryWill(topic string, payload []byte, qos byte, retained bool) *ClientOptions { - o.WillEnabled = true - o.WillTopic = topic - o.WillPayload = payload - o.WillQos = qos - o.WillRetained = retained - return o -} - -// SetDefaultPublishHandler sets the MessageHandler that will be called when a message -// is received that does not match any known subscriptions. -// -// If OrderMatters is true (the defaultHandler) then callback must not block or -// call functions within this package that may block (e.g. Publish) other than in -// a new go routine. -// defaultHandler must be safe for concurrent use by multiple goroutines. -func (o *ClientOptions) SetDefaultPublishHandler(defaultHandler MessageHandler) *ClientOptions { - o.DefaultPublishHandler = defaultHandler - return o -} - -// SetOnConnectHandler sets the function to be called when the client is connected. Both -// at initial connection time and upon automatic reconnect. -func (o *ClientOptions) SetOnConnectHandler(onConn OnConnectHandler) *ClientOptions { - o.OnConnect = onConn - return o -} - -// SetConnectionLostHandler will set the OnConnectionLost callback to be executed -// in the case where the client unexpectedly loses connection with the MQTT broker. -func (o *ClientOptions) SetConnectionLostHandler(onLost ConnectionLostHandler) *ClientOptions { - o.OnConnectionLost = onLost - return o -} - -// SetReconnectingHandler sets the OnReconnecting callback to be executed prior -// to the client attempting a reconnect to the MQTT broker. -func (o *ClientOptions) SetReconnectingHandler(cb ReconnectHandler) *ClientOptions { - o.OnReconnecting = cb - return o -} - -// SetConnectionAttemptHandler sets the ConnectionAttemptHandler callback to be executed prior -// to each attempt to connect to an MQTT broker. Returns the *tls.Config that will be used when establishing -// the connection (a copy of the tls.Config from ClientOptions will be passed in along with the broker URL). -// This allows connection specific changes to be made to the *tls.Config. -func (o *ClientOptions) SetConnectionAttemptHandler(onConnectAttempt ConnectionAttemptHandler) *ClientOptions { - o.OnConnectAttempt = onConnectAttempt - return o -} - -// SetWriteTimeout puts a limit on how long a mqtt publish should block until it unblocks with a -// timeout error. A duration of 0 never times out. Default never times out -func (o *ClientOptions) SetWriteTimeout(t time.Duration) *ClientOptions { - o.WriteTimeout = t - return o -} - -// SetConnectTimeout limits how long the client will wait when trying to open a connection -// to an MQTT server before timing out. A duration of 0 never times out. -// Default 30 seconds. Currently only operational on TCP/TLS connections. -func (o *ClientOptions) SetConnectTimeout(t time.Duration) *ClientOptions { - o.ConnectTimeout = t - o.Dialer.Timeout = t - return o -} - -// SetMaxReconnectInterval sets the maximum time that will be waited between reconnection attempts -// when connection is lost -func (o *ClientOptions) SetMaxReconnectInterval(t time.Duration) *ClientOptions { - o.MaxReconnectInterval = t - return o -} - -// SetAutoReconnect sets whether the automatic reconnection logic should be used -// when the connection is lost, even if disabled the ConnectionLostHandler is still -// called -func (o *ClientOptions) SetAutoReconnect(a bool) *ClientOptions { - o.AutoReconnect = a - return o -} - -// SetConnectRetryInterval sets the time that will be waited between connection attempts -// when initially connecting if ConnectRetry is TRUE -func (o *ClientOptions) SetConnectRetryInterval(t time.Duration) *ClientOptions { - o.ConnectRetryInterval = t - return o -} - -// SetConnectRetry sets whether the connect function will automatically retry the connection -// in the event of a failure (when true the token returned by the Connect function will -// not complete until the connection is up or it is cancelled) -// If ConnectRetry is true then subscriptions should be requested in OnConnect handler -// Setting this to TRUE permits messages to be published before the connection is established -func (o *ClientOptions) SetConnectRetry(a bool) *ClientOptions { - o.ConnectRetry = a - return o -} - -// SetMessageChannelDepth DEPRECATED The value set here no longer has any effect, this function -// remains so the API is not altered. -func (o *ClientOptions) SetMessageChannelDepth(s uint) *ClientOptions { - o.MessageChannelDepth = s - return o -} - -// SetHTTPHeaders sets the additional HTTP headers that will be sent in the WebSocket -// opening handshake. -func (o *ClientOptions) SetHTTPHeaders(h http.Header) *ClientOptions { - o.HTTPHeaders = h - return o -} - -// SetWebsocketOptions sets the additional websocket options used in a WebSocket connection -func (o *ClientOptions) SetWebsocketOptions(w *WebsocketOptions) *ClientOptions { - o.WebsocketOptions = w - return o -} - -// SetMaxResumePubInFlight sets the maximum simultaneous publish messages that will be sent while resuming. Note that -// this only applies to messages coming from the store (so additional sends may push us over the limit) -// Note that the connect token will not be flagged as complete until all messages have been sent from the -// store. If broker does not respond to messages then resume may not complete. -// This option was put in place because resuming after downtime can saturate low capacity links. -func (o *ClientOptions) SetMaxResumePubInFlight(MaxResumePubInFlight int) *ClientOptions { - o.MaxResumePubInFlight = MaxResumePubInFlight - return o -} - -// SetDialer sets the tcp dialer options used in a tcp connection -func (o *ClientOptions) SetDialer(dialer *net.Dialer) *ClientOptions { - o.Dialer = dialer - return o -} - -// SetCustomOpenConnectionFn replaces the inbuilt function that establishes a network connection with a custom function. -// The passed in function should return an open `net.Conn` or an error (see the existing openConnection function for an example) -// It enables custom networking types in addition to the defaults (tcp, tls, websockets...) -func (o *ClientOptions) SetCustomOpenConnectionFn(customOpenConnectionFn OpenConnectionFunc) *ClientOptions { - if customOpenConnectionFn != nil { - o.CustomOpenConnectionFn = customOpenConnectionFn - } - return o -} - -// SetAutoAckDisabled enables or disables the Automated Acking of Messages received by the handler. -// By default it is set to false. Setting it to true will disable the auto-ack globally. -func (o *ClientOptions) SetAutoAckDisabled(autoAckDisabled bool) *ClientOptions { - o.AutoAckDisabled = autoAckDisabled - return o -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/options_reader.go b/vendor/github.com/eclipse/paho.mqtt.golang/options_reader.go deleted file mode 100644 index 10a9e49a..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/options_reader.go +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import ( - "crypto/tls" - "net/http" - "net/url" - "time" -) - -// ClientOptionsReader provides an interface for reading ClientOptions after the client has been initialized. -type ClientOptionsReader struct { - options *ClientOptions -} - -// Servers returns a slice of the servers defined in the clientoptions -func (r *ClientOptionsReader) Servers() []*url.URL { - s := make([]*url.URL, len(r.options.Servers)) - - for i, u := range r.options.Servers { - nu := *u - s[i] = &nu - } - - return s -} - -// ResumeSubs returns true if resuming stored (un)sub is enabled -func (r *ClientOptionsReader) ResumeSubs() bool { - s := r.options.ResumeSubs - return s -} - -// ClientID returns the set client id -func (r *ClientOptionsReader) ClientID() string { - s := r.options.ClientID - return s -} - -// Username returns the set username -func (r *ClientOptionsReader) Username() string { - s := r.options.Username - return s -} - -// Password returns the set password -func (r *ClientOptionsReader) Password() string { - s := r.options.Password - return s -} - -// CleanSession returns whether Cleansession is set -func (r *ClientOptionsReader) CleanSession() bool { - s := r.options.CleanSession - return s -} - -func (r *ClientOptionsReader) Order() bool { - s := r.options.Order - return s -} - -func (r *ClientOptionsReader) WillEnabled() bool { - s := r.options.WillEnabled - return s -} - -func (r *ClientOptionsReader) WillTopic() string { - s := r.options.WillTopic - return s -} - -func (r *ClientOptionsReader) WillPayload() []byte { - s := r.options.WillPayload - return s -} - -func (r *ClientOptionsReader) WillQos() byte { - s := r.options.WillQos - return s -} - -func (r *ClientOptionsReader) WillRetained() bool { - s := r.options.WillRetained - return s -} - -func (r *ClientOptionsReader) ProtocolVersion() uint { - s := r.options.ProtocolVersion - return s -} - -func (r *ClientOptionsReader) TLSConfig() *tls.Config { - s := r.options.TLSConfig - return s -} - -func (r *ClientOptionsReader) KeepAlive() time.Duration { - s := time.Duration(r.options.KeepAlive * int64(time.Second)) - return s -} - -func (r *ClientOptionsReader) PingTimeout() time.Duration { - s := r.options.PingTimeout - return s -} - -func (r *ClientOptionsReader) ConnectTimeout() time.Duration { - s := r.options.ConnectTimeout - return s -} - -func (r *ClientOptionsReader) MaxReconnectInterval() time.Duration { - s := r.options.MaxReconnectInterval - return s -} - -func (r *ClientOptionsReader) AutoReconnect() bool { - s := r.options.AutoReconnect - return s -} - -// ConnectRetryInterval returns the delay between retries on the initial connection (if ConnectRetry true) -func (r *ClientOptionsReader) ConnectRetryInterval() time.Duration { - s := r.options.ConnectRetryInterval - return s -} - -// ConnectRetry returns whether the initial connection request will be retried until connection established -func (r *ClientOptionsReader) ConnectRetry() bool { - s := r.options.ConnectRetry - return s -} - -func (r *ClientOptionsReader) WriteTimeout() time.Duration { - s := r.options.WriteTimeout - return s -} - -func (r *ClientOptionsReader) MessageChannelDepth() uint { - s := r.options.MessageChannelDepth - return s -} - -func (r *ClientOptionsReader) HTTPHeaders() http.Header { - h := r.options.HTTPHeaders - return h -} - -// WebsocketOptions returns the currently configured WebSocket options -func (r *ClientOptionsReader) WebsocketOptions() *WebsocketOptions { - s := r.options.WebsocketOptions - return s -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/connack.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/connack.go deleted file mode 100644 index 3a7b98fc..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/packets/connack.go +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Allan Stockdill-Mander - */ - -package packets - -import ( - "bytes" - "fmt" - "io" -) - -// ConnackPacket is an internal representation of the fields of the -// Connack MQTT packet -type ConnackPacket struct { - FixedHeader - SessionPresent bool - ReturnCode byte -} - -func (ca *ConnackPacket) String() string { - return fmt.Sprintf("%s sessionpresent: %t returncode: %d", ca.FixedHeader, ca.SessionPresent, ca.ReturnCode) -} - -func (ca *ConnackPacket) Write(w io.Writer) error { - var body bytes.Buffer - var err error - - body.WriteByte(boolToByte(ca.SessionPresent)) - body.WriteByte(ca.ReturnCode) - ca.FixedHeader.RemainingLength = 2 - packet := ca.FixedHeader.pack() - packet.Write(body.Bytes()) - _, err = packet.WriteTo(w) - - return err -} - -// Unpack decodes the details of a ControlPacket after the fixed -// header has been read -func (ca *ConnackPacket) Unpack(b io.Reader) error { - flags, err := decodeByte(b) - if err != nil { - return err - } - ca.SessionPresent = 1&flags > 0 - ca.ReturnCode, err = decodeByte(b) - - return err -} - -// Details returns a Details struct containing the Qos and -// MessageID of this ControlPacket -func (ca *ConnackPacket) Details() Details { - return Details{Qos: 0, MessageID: 0} -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/connect.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/connect.go deleted file mode 100644 index b4446a55..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/packets/connect.go +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Allan Stockdill-Mander - */ - -package packets - -import ( - "bytes" - "fmt" - "io" -) - -// ConnectPacket is an internal representation of the fields of the -// Connect MQTT packet -type ConnectPacket struct { - FixedHeader - ProtocolName string - ProtocolVersion byte - CleanSession bool - WillFlag bool - WillQos byte - WillRetain bool - UsernameFlag bool - PasswordFlag bool - ReservedBit byte - Keepalive uint16 - - ClientIdentifier string - WillTopic string - WillMessage []byte - Username string - Password []byte -} - -func (c *ConnectPacket) String() string { - var password string - if len(c.Password) > 0 { - password = "" - } - return fmt.Sprintf("%s protocolversion: %d protocolname: %s cleansession: %t willflag: %t WillQos: %d WillRetain: %t Usernameflag: %t Passwordflag: %t keepalive: %d clientId: %s willtopic: %s willmessage: %s Username: %s Password: %s", c.FixedHeader, c.ProtocolVersion, c.ProtocolName, c.CleanSession, c.WillFlag, c.WillQos, c.WillRetain, c.UsernameFlag, c.PasswordFlag, c.Keepalive, c.ClientIdentifier, c.WillTopic, c.WillMessage, c.Username, password) -} - -func (c *ConnectPacket) Write(w io.Writer) error { - var body bytes.Buffer - var err error - - body.Write(encodeString(c.ProtocolName)) - body.WriteByte(c.ProtocolVersion) - body.WriteByte(boolToByte(c.CleanSession)<<1 | boolToByte(c.WillFlag)<<2 | c.WillQos<<3 | boolToByte(c.WillRetain)<<5 | boolToByte(c.PasswordFlag)<<6 | boolToByte(c.UsernameFlag)<<7) - body.Write(encodeUint16(c.Keepalive)) - body.Write(encodeString(c.ClientIdentifier)) - if c.WillFlag { - body.Write(encodeString(c.WillTopic)) - body.Write(encodeBytes(c.WillMessage)) - } - if c.UsernameFlag { - body.Write(encodeString(c.Username)) - } - if c.PasswordFlag { - body.Write(encodeBytes(c.Password)) - } - c.FixedHeader.RemainingLength = body.Len() - packet := c.FixedHeader.pack() - packet.Write(body.Bytes()) - _, err = packet.WriteTo(w) - - return err -} - -// Unpack decodes the details of a ControlPacket after the fixed -// header has been read -func (c *ConnectPacket) Unpack(b io.Reader) error { - var err error - c.ProtocolName, err = decodeString(b) - if err != nil { - return err - } - c.ProtocolVersion, err = decodeByte(b) - if err != nil { - return err - } - options, err := decodeByte(b) - if err != nil { - return err - } - c.ReservedBit = 1 & options - c.CleanSession = 1&(options>>1) > 0 - c.WillFlag = 1&(options>>2) > 0 - c.WillQos = 3 & (options >> 3) - c.WillRetain = 1&(options>>5) > 0 - c.PasswordFlag = 1&(options>>6) > 0 - c.UsernameFlag = 1&(options>>7) > 0 - c.Keepalive, err = decodeUint16(b) - if err != nil { - return err - } - c.ClientIdentifier, err = decodeString(b) - if err != nil { - return err - } - if c.WillFlag { - c.WillTopic, err = decodeString(b) - if err != nil { - return err - } - c.WillMessage, err = decodeBytes(b) - if err != nil { - return err - } - } - if c.UsernameFlag { - c.Username, err = decodeString(b) - if err != nil { - return err - } - } - if c.PasswordFlag { - c.Password, err = decodeBytes(b) - if err != nil { - return err - } - } - - return nil -} - -// Validate performs validation of the fields of a Connect packet -func (c *ConnectPacket) Validate() byte { - if c.PasswordFlag && !c.UsernameFlag { - return ErrRefusedBadUsernameOrPassword - } - if c.ReservedBit != 0 { - // Bad reserved bit - return ErrProtocolViolation - } - if (c.ProtocolName == "MQIsdp" && c.ProtocolVersion != 3) || (c.ProtocolName == "MQTT" && c.ProtocolVersion != 4) { - // Mismatched or unsupported protocol version - return ErrRefusedBadProtocolVersion - } - if c.ProtocolName != "MQIsdp" && c.ProtocolName != "MQTT" { - // Bad protocol name - return ErrProtocolViolation - } - if len(c.ClientIdentifier) > 65535 || len(c.Username) > 65535 || len(c.Password) > 65535 { - // Bad size field - return ErrProtocolViolation - } - if len(c.ClientIdentifier) == 0 && !c.CleanSession { - // Bad client identifier - return ErrRefusedIDRejected - } - return Accepted -} - -// Details returns a Details struct containing the Qos and -// MessageID of this ControlPacket -func (c *ConnectPacket) Details() Details { - return Details{Qos: 0, MessageID: 0} -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/disconnect.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/disconnect.go deleted file mode 100644 index cf352a37..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/packets/disconnect.go +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Allan Stockdill-Mander - */ - -package packets - -import ( - "io" -) - -// DisconnectPacket is an internal representation of the fields of the -// Disconnect MQTT packet -type DisconnectPacket struct { - FixedHeader -} - -func (d *DisconnectPacket) String() string { - return d.FixedHeader.String() -} - -func (d *DisconnectPacket) Write(w io.Writer) error { - packet := d.FixedHeader.pack() - _, err := packet.WriteTo(w) - - return err -} - -// Unpack decodes the details of a ControlPacket after the fixed -// header has been read -func (d *DisconnectPacket) Unpack(b io.Reader) error { - return nil -} - -// Details returns a Details struct containing the Qos and -// MessageID of this ControlPacket -func (d *DisconnectPacket) Details() Details { - return Details{Qos: 0, MessageID: 0} -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go deleted file mode 100644 index b2d7ed1b..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go +++ /dev/null @@ -1,372 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Allan Stockdill-Mander - */ - -package packets - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" -) - -// ControlPacket defines the interface for structs intended to hold -// decoded MQTT packets, either from being read or before being -// written -type ControlPacket interface { - Write(io.Writer) error - Unpack(io.Reader) error - String() string - Details() Details -} - -// PacketNames maps the constants for each of the MQTT packet types -// to a string representation of their name. -var PacketNames = map[uint8]string{ - 1: "CONNECT", - 2: "CONNACK", - 3: "PUBLISH", - 4: "PUBACK", - 5: "PUBREC", - 6: "PUBREL", - 7: "PUBCOMP", - 8: "SUBSCRIBE", - 9: "SUBACK", - 10: "UNSUBSCRIBE", - 11: "UNSUBACK", - 12: "PINGREQ", - 13: "PINGRESP", - 14: "DISCONNECT", -} - -// Below are the constants assigned to each of the MQTT packet types -const ( - Connect = 1 - Connack = 2 - Publish = 3 - Puback = 4 - Pubrec = 5 - Pubrel = 6 - Pubcomp = 7 - Subscribe = 8 - Suback = 9 - Unsubscribe = 10 - Unsuback = 11 - Pingreq = 12 - Pingresp = 13 - Disconnect = 14 -) - -// Below are the const definitions for error codes returned by -// Connect() -const ( - Accepted = 0x00 - ErrRefusedBadProtocolVersion = 0x01 - ErrRefusedIDRejected = 0x02 - ErrRefusedServerUnavailable = 0x03 - ErrRefusedBadUsernameOrPassword = 0x04 - ErrRefusedNotAuthorised = 0x05 - ErrNetworkError = 0xFE - ErrProtocolViolation = 0xFF -) - -// ConnackReturnCodes is a map of the error codes constants for Connect() -// to a string representation of the error -var ConnackReturnCodes = map[uint8]string{ - 0: "Connection Accepted", - 1: "Connection Refused: Bad Protocol Version", - 2: "Connection Refused: Client Identifier Rejected", - 3: "Connection Refused: Server Unavailable", - 4: "Connection Refused: Username or Password in unknown format", - 5: "Connection Refused: Not Authorised", - 254: "Connection Error", - 255: "Connection Refused: Protocol Violation", -} - -var ( - ErrorRefusedBadProtocolVersion = errors.New("unacceptable protocol version") - ErrorRefusedIDRejected = errors.New("identifier rejected") - ErrorRefusedServerUnavailable = errors.New("server Unavailable") - ErrorRefusedBadUsernameOrPassword = errors.New("bad user name or password") - ErrorRefusedNotAuthorised = errors.New("not Authorized") - ErrorNetworkError = errors.New("network Error") - ErrorProtocolViolation = errors.New("protocol Violation") -) - -// ConnErrors is a map of the errors codes constants for Connect() -// to a Go error -var ConnErrors = map[byte]error{ - Accepted: nil, - ErrRefusedBadProtocolVersion: ErrorRefusedBadProtocolVersion, - ErrRefusedIDRejected: ErrorRefusedIDRejected, - ErrRefusedServerUnavailable: ErrorRefusedServerUnavailable, - ErrRefusedBadUsernameOrPassword: ErrorRefusedBadUsernameOrPassword, - ErrRefusedNotAuthorised: ErrorRefusedNotAuthorised, - ErrNetworkError: ErrorNetworkError, - ErrProtocolViolation: ErrorProtocolViolation, -} - -// ReadPacket takes an instance of an io.Reader (such as net.Conn) and attempts -// to read an MQTT packet from the stream. It returns a ControlPacket -// representing the decoded MQTT packet and an error. One of these returns will -// always be nil, a nil ControlPacket indicating an error occurred. -func ReadPacket(r io.Reader) (ControlPacket, error) { - var fh FixedHeader - b := make([]byte, 1) - - _, err := io.ReadFull(r, b) - if err != nil { - return nil, err - } - - err = fh.unpack(b[0], r) - if err != nil { - return nil, err - } - - cp, err := NewControlPacketWithHeader(fh) - if err != nil { - return nil, err - } - - packetBytes := make([]byte, fh.RemainingLength) - n, err := io.ReadFull(r, packetBytes) - if err != nil { - return nil, err - } - if n != fh.RemainingLength { - return nil, errors.New("failed to read expected data") - } - - err = cp.Unpack(bytes.NewBuffer(packetBytes)) - return cp, err -} - -// NewControlPacket is used to create a new ControlPacket of the type specified -// by packetType, this is usually done by reference to the packet type constants -// defined in packets.go. The newly created ControlPacket is empty and a pointer -// is returned. -func NewControlPacket(packetType byte) ControlPacket { - switch packetType { - case Connect: - return &ConnectPacket{FixedHeader: FixedHeader{MessageType: Connect}} - case Connack: - return &ConnackPacket{FixedHeader: FixedHeader{MessageType: Connack}} - case Disconnect: - return &DisconnectPacket{FixedHeader: FixedHeader{MessageType: Disconnect}} - case Publish: - return &PublishPacket{FixedHeader: FixedHeader{MessageType: Publish}} - case Puback: - return &PubackPacket{FixedHeader: FixedHeader{MessageType: Puback}} - case Pubrec: - return &PubrecPacket{FixedHeader: FixedHeader{MessageType: Pubrec}} - case Pubrel: - return &PubrelPacket{FixedHeader: FixedHeader{MessageType: Pubrel, Qos: 1}} - case Pubcomp: - return &PubcompPacket{FixedHeader: FixedHeader{MessageType: Pubcomp}} - case Subscribe: - return &SubscribePacket{FixedHeader: FixedHeader{MessageType: Subscribe, Qos: 1}} - case Suback: - return &SubackPacket{FixedHeader: FixedHeader{MessageType: Suback}} - case Unsubscribe: - return &UnsubscribePacket{FixedHeader: FixedHeader{MessageType: Unsubscribe, Qos: 1}} - case Unsuback: - return &UnsubackPacket{FixedHeader: FixedHeader{MessageType: Unsuback}} - case Pingreq: - return &PingreqPacket{FixedHeader: FixedHeader{MessageType: Pingreq}} - case Pingresp: - return &PingrespPacket{FixedHeader: FixedHeader{MessageType: Pingresp}} - } - return nil -} - -// NewControlPacketWithHeader is used to create a new ControlPacket of the type -// specified within the FixedHeader that is passed to the function. -// The newly created ControlPacket is empty and a pointer is returned. -func NewControlPacketWithHeader(fh FixedHeader) (ControlPacket, error) { - switch fh.MessageType { - case Connect: - return &ConnectPacket{FixedHeader: fh}, nil - case Connack: - return &ConnackPacket{FixedHeader: fh}, nil - case Disconnect: - return &DisconnectPacket{FixedHeader: fh}, nil - case Publish: - return &PublishPacket{FixedHeader: fh}, nil - case Puback: - return &PubackPacket{FixedHeader: fh}, nil - case Pubrec: - return &PubrecPacket{FixedHeader: fh}, nil - case Pubrel: - return &PubrelPacket{FixedHeader: fh}, nil - case Pubcomp: - return &PubcompPacket{FixedHeader: fh}, nil - case Subscribe: - return &SubscribePacket{FixedHeader: fh}, nil - case Suback: - return &SubackPacket{FixedHeader: fh}, nil - case Unsubscribe: - return &UnsubscribePacket{FixedHeader: fh}, nil - case Unsuback: - return &UnsubackPacket{FixedHeader: fh}, nil - case Pingreq: - return &PingreqPacket{FixedHeader: fh}, nil - case Pingresp: - return &PingrespPacket{FixedHeader: fh}, nil - } - return nil, fmt.Errorf("unsupported packet type 0x%x", fh.MessageType) -} - -// Details struct returned by the Details() function called on -// ControlPackets to present details of the Qos and MessageID -// of the ControlPacket -type Details struct { - Qos byte - MessageID uint16 -} - -// FixedHeader is a struct to hold the decoded information from -// the fixed header of an MQTT ControlPacket -type FixedHeader struct { - MessageType byte - Dup bool - Qos byte - Retain bool - RemainingLength int -} - -func (fh FixedHeader) String() string { - return fmt.Sprintf("%s: dup: %t qos: %d retain: %t rLength: %d", PacketNames[fh.MessageType], fh.Dup, fh.Qos, fh.Retain, fh.RemainingLength) -} - -func boolToByte(b bool) byte { - switch b { - case true: - return 1 - default: - return 0 - } -} - -func (fh *FixedHeader) pack() bytes.Buffer { - var header bytes.Buffer - header.WriteByte(fh.MessageType<<4 | boolToByte(fh.Dup)<<3 | fh.Qos<<1 | boolToByte(fh.Retain)) - header.Write(encodeLength(fh.RemainingLength)) - return header -} - -func (fh *FixedHeader) unpack(typeAndFlags byte, r io.Reader) error { - fh.MessageType = typeAndFlags >> 4 - fh.Dup = (typeAndFlags>>3)&0x01 > 0 - fh.Qos = (typeAndFlags >> 1) & 0x03 - fh.Retain = typeAndFlags&0x01 > 0 - - var err error - fh.RemainingLength, err = decodeLength(r) - return err -} - -func decodeByte(b io.Reader) (byte, error) { - num := make([]byte, 1) - _, err := b.Read(num) - if err != nil { - return 0, err - } - - return num[0], nil -} - -func decodeUint16(b io.Reader) (uint16, error) { - num := make([]byte, 2) - _, err := b.Read(num) - if err != nil { - return 0, err - } - return binary.BigEndian.Uint16(num), nil -} - -func encodeUint16(num uint16) []byte { - bytesResult := make([]byte, 2) - binary.BigEndian.PutUint16(bytesResult, num) - return bytesResult -} - -func encodeString(field string) []byte { - return encodeBytes([]byte(field)) -} - -func decodeString(b io.Reader) (string, error) { - buf, err := decodeBytes(b) - return string(buf), err -} - -func decodeBytes(b io.Reader) ([]byte, error) { - fieldLength, err := decodeUint16(b) - if err != nil { - return nil, err - } - - field := make([]byte, fieldLength) - _, err = b.Read(field) - if err != nil { - return nil, err - } - - return field, nil -} - -func encodeBytes(field []byte) []byte { - fieldLength := make([]byte, 2) - binary.BigEndian.PutUint16(fieldLength, uint16(len(field))) - return append(fieldLength, field...) -} - -func encodeLength(length int) []byte { - var encLength []byte - for { - digit := byte(length % 128) - length /= 128 - if length > 0 { - digit |= 0x80 - } - encLength = append(encLength, digit) - if length == 0 { - break - } - } - return encLength -} - -func decodeLength(r io.Reader) (int, error) { - var rLength uint32 - var multiplier uint32 - b := make([]byte, 1) - for multiplier < 27 { // fix: Infinite '(digit & 128) == 1' will cause the dead loop - _, err := io.ReadFull(r, b) - if err != nil { - return 0, err - } - - digit := b[0] - rLength |= uint32(digit&127) << multiplier - if (digit & 128) == 0 { - break - } - multiplier += 7 - } - return int(rLength), nil -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingreq.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingreq.go deleted file mode 100644 index cd52948e..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingreq.go +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Allan Stockdill-Mander - */ - -package packets - -import ( - "io" -) - -// PingreqPacket is an internal representation of the fields of the -// Pingreq MQTT packet -type PingreqPacket struct { - FixedHeader -} - -func (pr *PingreqPacket) String() string { - return pr.FixedHeader.String() -} - -func (pr *PingreqPacket) Write(w io.Writer) error { - packet := pr.FixedHeader.pack() - _, err := packet.WriteTo(w) - - return err -} - -// Unpack decodes the details of a ControlPacket after the fixed -// header has been read -func (pr *PingreqPacket) Unpack(b io.Reader) error { - return nil -} - -// Details returns a Details struct containing the Qos and -// MessageID of this ControlPacket -func (pr *PingreqPacket) Details() Details { - return Details{Qos: 0, MessageID: 0} -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingresp.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingresp.go deleted file mode 100644 index d7becdf2..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingresp.go +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Allan Stockdill-Mander - */ - -package packets - -import ( - "io" -) - -// PingrespPacket is an internal representation of the fields of the -// Pingresp MQTT packet -type PingrespPacket struct { - FixedHeader -} - -func (pr *PingrespPacket) String() string { - return pr.FixedHeader.String() -} - -func (pr *PingrespPacket) Write(w io.Writer) error { - packet := pr.FixedHeader.pack() - _, err := packet.WriteTo(w) - - return err -} - -// Unpack decodes the details of a ControlPacket after the fixed -// header has been read -func (pr *PingrespPacket) Unpack(b io.Reader) error { - return nil -} - -// Details returns a Details struct containing the Qos and -// MessageID of this ControlPacket -func (pr *PingrespPacket) Details() Details { - return Details{Qos: 0, MessageID: 0} -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/puback.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/puback.go deleted file mode 100644 index f6e727ec..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/packets/puback.go +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Allan Stockdill-Mander - */ - -package packets - -import ( - "fmt" - "io" -) - -// PubackPacket is an internal representation of the fields of the -// Puback MQTT packet -type PubackPacket struct { - FixedHeader - MessageID uint16 -} - -func (pa *PubackPacket) String() string { - return fmt.Sprintf("%s MessageID: %d", pa.FixedHeader, pa.MessageID) -} - -func (pa *PubackPacket) Write(w io.Writer) error { - var err error - pa.FixedHeader.RemainingLength = 2 - packet := pa.FixedHeader.pack() - packet.Write(encodeUint16(pa.MessageID)) - _, err = packet.WriteTo(w) - - return err -} - -// Unpack decodes the details of a ControlPacket after the fixed -// header has been read -func (pa *PubackPacket) Unpack(b io.Reader) error { - var err error - pa.MessageID, err = decodeUint16(b) - - return err -} - -// Details returns a Details struct containing the Qos and -// MessageID of this ControlPacket -func (pa *PubackPacket) Details() Details { - return Details{Qos: pa.Qos, MessageID: pa.MessageID} -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubcomp.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubcomp.go deleted file mode 100644 index 84a1af5d..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubcomp.go +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Allan Stockdill-Mander - */ - -package packets - -import ( - "fmt" - "io" -) - -// PubcompPacket is an internal representation of the fields of the -// Pubcomp MQTT packet -type PubcompPacket struct { - FixedHeader - MessageID uint16 -} - -func (pc *PubcompPacket) String() string { - return fmt.Sprintf("%s MessageID: %d", pc.FixedHeader, pc.MessageID) -} - -func (pc *PubcompPacket) Write(w io.Writer) error { - var err error - pc.FixedHeader.RemainingLength = 2 - packet := pc.FixedHeader.pack() - packet.Write(encodeUint16(pc.MessageID)) - _, err = packet.WriteTo(w) - - return err -} - -// Unpack decodes the details of a ControlPacket after the fixed -// header has been read -func (pc *PubcompPacket) Unpack(b io.Reader) error { - var err error - pc.MessageID, err = decodeUint16(b) - - return err -} - -// Details returns a Details struct containing the Qos and -// MessageID of this ControlPacket -func (pc *PubcompPacket) Details() Details { - return Details{Qos: pc.Qos, MessageID: pc.MessageID} -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/publish.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/publish.go deleted file mode 100644 index 9fba5df8..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/packets/publish.go +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Allan Stockdill-Mander - */ - -package packets - -import ( - "bytes" - "fmt" - "io" -) - -// PublishPacket is an internal representation of the fields of the -// Publish MQTT packet -type PublishPacket struct { - FixedHeader - TopicName string - MessageID uint16 - Payload []byte -} - -func (p *PublishPacket) String() string { - return fmt.Sprintf("%s topicName: %s MessageID: %d payload: %s", p.FixedHeader, p.TopicName, p.MessageID, string(p.Payload)) -} - -func (p *PublishPacket) Write(w io.Writer) error { - var body bytes.Buffer - var err error - - body.Write(encodeString(p.TopicName)) - if p.Qos > 0 { - body.Write(encodeUint16(p.MessageID)) - } - p.FixedHeader.RemainingLength = body.Len() + len(p.Payload) - packet := p.FixedHeader.pack() - packet.Write(body.Bytes()) - packet.Write(p.Payload) - _, err = w.Write(packet.Bytes()) - - return err -} - -// Unpack decodes the details of a ControlPacket after the fixed -// header has been read -func (p *PublishPacket) Unpack(b io.Reader) error { - var payloadLength = p.FixedHeader.RemainingLength - var err error - p.TopicName, err = decodeString(b) - if err != nil { - return err - } - - if p.Qos > 0 { - p.MessageID, err = decodeUint16(b) - if err != nil { - return err - } - payloadLength -= len(p.TopicName) + 4 - } else { - payloadLength -= len(p.TopicName) + 2 - } - if payloadLength < 0 { - return fmt.Errorf("error unpacking publish, payload length < 0") - } - p.Payload = make([]byte, payloadLength) - _, err = b.Read(p.Payload) - - return err -} - -// Copy creates a new PublishPacket with the same topic and payload -// but an empty fixed header, useful for when you want to deliver -// a message with different properties such as Qos but the same -// content -func (p *PublishPacket) Copy() *PublishPacket { - newP := NewControlPacket(Publish).(*PublishPacket) - newP.TopicName = p.TopicName - newP.Payload = p.Payload - - return newP -} - -// Details returns a Details struct containing the Qos and -// MessageID of this ControlPacket -func (p *PublishPacket) Details() Details { - return Details{Qos: p.Qos, MessageID: p.MessageID} -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrec.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrec.go deleted file mode 100644 index da9ed2a4..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrec.go +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Allan Stockdill-Mander - */ - -package packets - -import ( - "fmt" - "io" -) - -// PubrecPacket is an internal representation of the fields of the -// Pubrec MQTT packet -type PubrecPacket struct { - FixedHeader - MessageID uint16 -} - -func (pr *PubrecPacket) String() string { - return fmt.Sprintf("%s MessageID: %d", pr.FixedHeader, pr.MessageID) -} - -func (pr *PubrecPacket) Write(w io.Writer) error { - var err error - pr.FixedHeader.RemainingLength = 2 - packet := pr.FixedHeader.pack() - packet.Write(encodeUint16(pr.MessageID)) - _, err = packet.WriteTo(w) - - return err -} - -// Unpack decodes the details of a ControlPacket after the fixed -// header has been read -func (pr *PubrecPacket) Unpack(b io.Reader) error { - var err error - pr.MessageID, err = decodeUint16(b) - - return err -} - -// Details returns a Details struct containing the Qos and -// MessageID of this ControlPacket -func (pr *PubrecPacket) Details() Details { - return Details{Qos: pr.Qos, MessageID: pr.MessageID} -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrel.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrel.go deleted file mode 100644 index f418ff86..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrel.go +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Allan Stockdill-Mander - */ - -package packets - -import ( - "fmt" - "io" -) - -// PubrelPacket is an internal representation of the fields of the -// Pubrel MQTT packet -type PubrelPacket struct { - FixedHeader - MessageID uint16 -} - -func (pr *PubrelPacket) String() string { - return fmt.Sprintf("%s MessageID: %d", pr.FixedHeader, pr.MessageID) -} - -func (pr *PubrelPacket) Write(w io.Writer) error { - var err error - pr.FixedHeader.RemainingLength = 2 - packet := pr.FixedHeader.pack() - packet.Write(encodeUint16(pr.MessageID)) - _, err = packet.WriteTo(w) - - return err -} - -// Unpack decodes the details of a ControlPacket after the fixed -// header has been read -func (pr *PubrelPacket) Unpack(b io.Reader) error { - var err error - pr.MessageID, err = decodeUint16(b) - - return err -} - -// Details returns a Details struct containing the Qos and -// MessageID of this ControlPacket -func (pr *PubrelPacket) Details() Details { - return Details{Qos: pr.Qos, MessageID: pr.MessageID} -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/suback.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/suback.go deleted file mode 100644 index 261cf21c..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/packets/suback.go +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Allan Stockdill-Mander - */ - -package packets - -import ( - "bytes" - "fmt" - "io" -) - -// SubackPacket is an internal representation of the fields of the -// Suback MQTT packet -type SubackPacket struct { - FixedHeader - MessageID uint16 - ReturnCodes []byte -} - -func (sa *SubackPacket) String() string { - return fmt.Sprintf("%s MessageID: %d", sa.FixedHeader, sa.MessageID) -} - -func (sa *SubackPacket) Write(w io.Writer) error { - var body bytes.Buffer - var err error - body.Write(encodeUint16(sa.MessageID)) - body.Write(sa.ReturnCodes) - sa.FixedHeader.RemainingLength = body.Len() - packet := sa.FixedHeader.pack() - packet.Write(body.Bytes()) - _, err = packet.WriteTo(w) - - return err -} - -// Unpack decodes the details of a ControlPacket after the fixed -// header has been read -func (sa *SubackPacket) Unpack(b io.Reader) error { - var qosBuffer bytes.Buffer - var err error - sa.MessageID, err = decodeUint16(b) - if err != nil { - return err - } - - _, err = qosBuffer.ReadFrom(b) - if err != nil { - return err - } - sa.ReturnCodes = qosBuffer.Bytes() - - return nil -} - -// Details returns a Details struct containing the Qos and -// MessageID of this ControlPacket -func (sa *SubackPacket) Details() Details { - return Details{Qos: 0, MessageID: sa.MessageID} -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/subscribe.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/subscribe.go deleted file mode 100644 index 313bf5a2..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/packets/subscribe.go +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Allan Stockdill-Mander - */ - -package packets - -import ( - "bytes" - "fmt" - "io" -) - -// SubscribePacket is an internal representation of the fields of the -// Subscribe MQTT packet -type SubscribePacket struct { - FixedHeader - MessageID uint16 - Topics []string - Qoss []byte -} - -func (s *SubscribePacket) String() string { - return fmt.Sprintf("%s MessageID: %d topics: %s", s.FixedHeader, s.MessageID, s.Topics) -} - -func (s *SubscribePacket) Write(w io.Writer) error { - var body bytes.Buffer - var err error - - body.Write(encodeUint16(s.MessageID)) - for i, topic := range s.Topics { - body.Write(encodeString(topic)) - body.WriteByte(s.Qoss[i]) - } - s.FixedHeader.RemainingLength = body.Len() - packet := s.FixedHeader.pack() - packet.Write(body.Bytes()) - _, err = packet.WriteTo(w) - - return err -} - -// Unpack decodes the details of a ControlPacket after the fixed -// header has been read -func (s *SubscribePacket) Unpack(b io.Reader) error { - var err error - s.MessageID, err = decodeUint16(b) - if err != nil { - return err - } - payloadLength := s.FixedHeader.RemainingLength - 2 - for payloadLength > 0 { - topic, err := decodeString(b) - if err != nil { - return err - } - s.Topics = append(s.Topics, topic) - qos, err := decodeByte(b) - if err != nil { - return err - } - s.Qoss = append(s.Qoss, qos) - payloadLength -= 2 + len(topic) + 1 // 2 bytes of string length, plus string, plus 1 byte for Qos - } - - return nil -} - -// Details returns a Details struct containing the Qos and -// MessageID of this ControlPacket -func (s *SubscribePacket) Details() Details { - return Details{Qos: 1, MessageID: s.MessageID} -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsuback.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsuback.go deleted file mode 100644 index acdd400a..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsuback.go +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Allan Stockdill-Mander - */ - -package packets - -import ( - "fmt" - "io" -) - -// UnsubackPacket is an internal representation of the fields of the -// Unsuback MQTT packet -type UnsubackPacket struct { - FixedHeader - MessageID uint16 -} - -func (ua *UnsubackPacket) String() string { - return fmt.Sprintf("%s MessageID: %d", ua.FixedHeader, ua.MessageID) -} - -func (ua *UnsubackPacket) Write(w io.Writer) error { - var err error - ua.FixedHeader.RemainingLength = 2 - packet := ua.FixedHeader.pack() - packet.Write(encodeUint16(ua.MessageID)) - _, err = packet.WriteTo(w) - - return err -} - -// Unpack decodes the details of a ControlPacket after the fixed -// header has been read -func (ua *UnsubackPacket) Unpack(b io.Reader) error { - var err error - ua.MessageID, err = decodeUint16(b) - - return err -} - -// Details returns a Details struct containing the Qos and -// MessageID of this ControlPacket -func (ua *UnsubackPacket) Details() Details { - return Details{Qos: 0, MessageID: ua.MessageID} -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsubscribe.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsubscribe.go deleted file mode 100644 index 54d06aa2..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsubscribe.go +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Allan Stockdill-Mander - */ - -package packets - -import ( - "bytes" - "fmt" - "io" -) - -// UnsubscribePacket is an internal representation of the fields of the -// Unsubscribe MQTT packet -type UnsubscribePacket struct { - FixedHeader - MessageID uint16 - Topics []string -} - -func (u *UnsubscribePacket) String() string { - return fmt.Sprintf("%s MessageID: %d", u.FixedHeader, u.MessageID) -} - -func (u *UnsubscribePacket) Write(w io.Writer) error { - var body bytes.Buffer - var err error - body.Write(encodeUint16(u.MessageID)) - for _, topic := range u.Topics { - body.Write(encodeString(topic)) - } - u.FixedHeader.RemainingLength = body.Len() - packet := u.FixedHeader.pack() - packet.Write(body.Bytes()) - _, err = packet.WriteTo(w) - - return err -} - -// Unpack decodes the details of a ControlPacket after the fixed -// header has been read -func (u *UnsubscribePacket) Unpack(b io.Reader) error { - var err error - u.MessageID, err = decodeUint16(b) - if err != nil { - return err - } - - for topic, err := decodeString(b); err == nil && topic != ""; topic, err = decodeString(b) { - u.Topics = append(u.Topics, topic) - } - - return err -} - -// Details returns a Details struct containing the Qos and -// MessageID of this ControlPacket -func (u *UnsubscribePacket) Details() Details { - return Details{Qos: 1, MessageID: u.MessageID} -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/ping.go b/vendor/github.com/eclipse/paho.mqtt.golang/ping.go deleted file mode 100644 index 91cd3dec..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/ping.go +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import ( - "errors" - "io" - "sync/atomic" - "time" - - "github.com/eclipse/paho.mqtt.golang/packets" -) - -// keepalive - Send ping when connection unused for set period -// connection passed in to avoid race condition on shutdown -func keepalive(c *client, conn io.Writer) { - defer c.workers.Done() - DEBUG.Println(PNG, "keepalive starting") - var checkInterval time.Duration - var pingSent time.Time - - if c.options.KeepAlive > 10 { - checkInterval = 5 * time.Second - } else { - checkInterval = time.Duration(c.options.KeepAlive) * time.Second / 2 - } - - intervalTicker := time.NewTicker(checkInterval) - defer intervalTicker.Stop() - - for { - select { - case <-c.stop: - DEBUG.Println(PNG, "keepalive stopped") - return - case <-intervalTicker.C: - lastSent := c.lastSent.Load().(time.Time) - lastReceived := c.lastReceived.Load().(time.Time) - - DEBUG.Println(PNG, "ping check", time.Since(lastSent).Seconds()) - if time.Since(lastSent) >= time.Duration(c.options.KeepAlive*int64(time.Second)) || time.Since(lastReceived) >= time.Duration(c.options.KeepAlive*int64(time.Second)) { - if atomic.LoadInt32(&c.pingOutstanding) == 0 { - DEBUG.Println(PNG, "keepalive sending ping") - ping := packets.NewControlPacket(packets.Pingreq).(*packets.PingreqPacket) - // We don't want to wait behind large messages being sent, the `Write` call - // will block until it is able to send the packet. - atomic.StoreInt32(&c.pingOutstanding, 1) - if err := ping.Write(conn); err != nil { - ERROR.Println(PNG, err) - } - c.lastSent.Store(time.Now()) - pingSent = time.Now() - } - } - if atomic.LoadInt32(&c.pingOutstanding) > 0 && time.Since(pingSent) >= c.options.PingTimeout { - CRITICAL.Println(PNG, "pingresp not received, disconnecting") - c.internalConnLost(errors.New("pingresp not received, disconnecting")) // no harm in calling this if the connection is already down (or shutdown is in progress) - return - } - } - } -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/router.go b/vendor/github.com/eclipse/paho.mqtt.golang/router.go deleted file mode 100644 index bd05a0c0..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/router.go +++ /dev/null @@ -1,239 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import ( - "container/list" - "strings" - "sync" - - "github.com/eclipse/paho.mqtt.golang/packets" -) - -// route is a type which associates MQTT Topic strings with a -// callback to be executed upon the arrival of a message associated -// with a subscription to that topic. -type route struct { - topic string - callback MessageHandler -} - -// match takes a slice of strings which represent the route being tested having been split on '/' -// separators, and a slice of strings representing the topic string in the published message, similarly -// split. -// The function determines if the topic string matches the route according to the MQTT topic rules -// and returns a boolean of the outcome -func match(route []string, topic []string) bool { - if len(route) == 0 { - return len(topic) == 0 - } - - if len(topic) == 0 { - return route[0] == "#" - } - - if route[0] == "#" { - return true - } - - if (route[0] == "+") || (route[0] == topic[0]) { - return match(route[1:], topic[1:]) - } - return false -} - -func routeIncludesTopic(route, topic string) bool { - return match(routeSplit(route), strings.Split(topic, "/")) -} - -// removes $share and sharename when splitting the route to allow -// shared subscription routes to correctly match the topic -func routeSplit(route string) []string { - var result []string - if strings.HasPrefix(route, "$share") { - result = strings.Split(route, "/")[2:] - } else { - result = strings.Split(route, "/") - } - return result -} - -// match takes the topic string of the published message and does a basic compare to the -// string of the current Route, if they match it returns true -func (r *route) match(topic string) bool { - return r.topic == topic || routeIncludesTopic(r.topic, topic) -} - -type router struct { - sync.RWMutex - routes *list.List - defaultHandler MessageHandler - messages chan *packets.PublishPacket -} - -// newRouter returns a new instance of a Router and channel which can be used to tell the Router -// to stop -func newRouter() *router { - router := &router{routes: list.New(), messages: make(chan *packets.PublishPacket)} - return router -} - -// addRoute takes a topic string and MessageHandler callback. It looks in the current list of -// routes to see if there is already a matching Route. If there is it replaces the current -// callback with the new one. If not it add a new entry to the list of Routes. -func (r *router) addRoute(topic string, callback MessageHandler) { - r.Lock() - defer r.Unlock() - for e := r.routes.Front(); e != nil; e = e.Next() { - if e.Value.(*route).topic == topic { - r := e.Value.(*route) - r.callback = callback - return - } - } - r.routes.PushBack(&route{topic: topic, callback: callback}) -} - -// deleteRoute takes a route string, looks for a matching Route in the list of Routes. If -// found it removes the Route from the list. -func (r *router) deleteRoute(topic string) { - r.Lock() - defer r.Unlock() - for e := r.routes.Front(); e != nil; e = e.Next() { - if e.Value.(*route).topic == topic { - r.routes.Remove(e) - return - } - } -} - -// setDefaultHandler assigns a default callback that will be called if no matching Route -// is found for an incoming Publish. -func (r *router) setDefaultHandler(handler MessageHandler) { - r.Lock() - defer r.Unlock() - r.defaultHandler = handler -} - -// matchAndDispatch takes a channel of Message pointers as input and starts a go routine that -// takes messages off the channel, matches them against the internal route list and calls the -// associated callback (or the defaultHandler, if one exists and no other route matched). If -// anything is sent down the stop channel the function will end. -func (r *router) matchAndDispatch(messages <-chan *packets.PublishPacket, order bool, client *client) <-chan *PacketAndToken { - var wg sync.WaitGroup - ackOutChan := make(chan *PacketAndToken) // Channel returned to caller; closed when messages channel closed - var ackInChan chan *PacketAndToken // ACKs generated by ackFunc get put onto this channel - - stopAckCopy := make(chan struct{}) // Closure requests stop of go routine copying ackInChan to ackOutChan - ackCopyStopped := make(chan struct{}) // Closure indicates that it is safe to close ackOutChan - goRoutinesDone := make(chan struct{}) // closed on wg.Done() - if order { - ackInChan = ackOutChan // When order = true no go routines are used so safe to use one channel and close when done - } else { - // When order = false ACK messages are sent in go routines so ackInChan cannot be closed until all goroutines done - ackInChan = make(chan *PacketAndToken) - go func() { // go routine to copy from ackInChan to ackOutChan until stopped - for { - select { - case a := <-ackInChan: - ackOutChan <- a - case <-stopAckCopy: - close(ackCopyStopped) // Signal main go routine that it is safe to close ackOutChan - for { - select { - case <-ackInChan: // drain ackInChan to ensure all goRoutines can complete cleanly (ACK dropped) - DEBUG.Println(ROU, "matchAndDispatch received acknowledgment after processing stopped (ACK dropped).") - case <-goRoutinesDone: - close(ackInChan) // Nothing further should be sent (a panic is probably better than silent failure) - DEBUG.Println(ROU, "matchAndDispatch order=false copy goroutine exiting.") - return - } - } - } - } - }() - } - - go func() { // Main go routine handling inbound messages - for message := range messages { - // DEBUG.Println(ROU, "matchAndDispatch received message") - sent := false - r.RLock() - m := messageFromPublish(message, ackFunc(ackInChan, client.persist, message)) - var handlers []MessageHandler - for e := r.routes.Front(); e != nil; e = e.Next() { - if e.Value.(*route).match(message.TopicName) { - if order { - handlers = append(handlers, e.Value.(*route).callback) - } else { - hd := e.Value.(*route).callback - wg.Add(1) - go func() { - hd(client, m) - if !client.options.AutoAckDisabled { - m.Ack() - } - wg.Done() - }() - } - sent = true - } - } - if !sent { - if r.defaultHandler != nil { - if order { - handlers = append(handlers, r.defaultHandler) - } else { - wg.Add(1) - go func() { - r.defaultHandler(client, m) - if !client.options.AutoAckDisabled { - m.Ack() - } - wg.Done() - }() - } - } else { - DEBUG.Println(ROU, "matchAndDispatch received message and no handler was available. Message will NOT be acknowledged.") - } - } - r.RUnlock() - for _, handler := range handlers { - handler(client, m) - if !client.options.AutoAckDisabled { - m.Ack() - } - } - // DEBUG.Println(ROU, "matchAndDispatch handled message") - } - if order { - close(ackOutChan) - } else { // Ensure that nothing further will be written to ackOutChan before closing it - close(stopAckCopy) - <-ackCopyStopped - close(ackOutChan) - go func() { - wg.Wait() // Note: If this remains running then the user has handlers that are not returning - close(goRoutinesDone) - }() - } - DEBUG.Println(ROU, "matchAndDispatch exiting") - }() - return ackOutChan -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/status.go b/vendor/github.com/eclipse/paho.mqtt.golang/status.go deleted file mode 100644 index d25fbf50..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/status.go +++ /dev/null @@ -1,296 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - * Matt Brittan - */ - -package mqtt - -import ( - "errors" - "sync" -) - -// Status - Manage the connection status - -// Multiple go routines will want to access/set this. Previously status was implemented as a `uint32` and updated -// with a mixture of atomic functions and a mutex (leading to some deadlock type issues that were very hard to debug). - -// In this new implementation `connectionStatus` takes over managing the state and provides functions that allow the -// client to request a move to a particular state (it may reject these requests!). In some cases the 'state' is -// transitory, for example `connecting`, in those cases a function will be returned that allows the client to move -// to a more static state (`disconnected` or `connected`). - -// This "belts-and-braces" may be a little over the top but issues with the status have caused a number of difficult -// to trace bugs in the past and the likelihood that introducing a new system would introduce bugs seemed high! -// I have written this in a way that should make it very difficult to misuse it (but it does make things a little -// complex with functions returning functions that return functions!). - -type status uint32 - -const ( - disconnected status = iota // default (nil) status is disconnected - disconnecting // Transitioning from one of the below states back to disconnected - connecting - reconnecting - connected -) - -// String simplify output of statuses -func (s status) String() string { - switch s { - case disconnected: - return "disconnected" - case disconnecting: - return "disconnecting" - case connecting: - return "connecting" - case reconnecting: - return "reconnecting" - case connected: - return "connected" - default: - return "invalid" - } -} - -type connCompletedFn func(success bool) error -type disconnectCompletedFn func() -type connectionLostHandledFn func(bool) (connCompletedFn, error) - -/* State transitions - -static states are `disconnected` and `connected`. For all other states a process will hold a function that will move -the state to one of those. That function effectively owns the state and any other changes must not proceed until it -completes. One exception to that is that the state can always be moved to `disconnecting` which provides a signal that -transitions to `connected` will be rejected (this is required because a Disconnect can be requested while in the -Connecting state). - -# Basic Operations - -The standard workflows are: - -disconnected -> `Connecting()` -> connecting -> `connCompletedFn(true)` -> connected -connected -> `Disconnecting()` -> disconnecting -> `disconnectCompletedFn()` -> disconnected -connected -> `ConnectionLost(false)` -> disconnecting -> `connectionLostHandledFn(true/false)` -> disconnected -connected -> `ConnectionLost(true)` -> disconnecting -> `connectionLostHandledFn(true)` -> connected - -Unfortunately the above workflows are complicated by the fact that `Disconnecting()` or `ConnectionLost()` may, -potentially, be called at any time (i.e. whilst in the middle of transitioning between states). If this happens: - -* The state will be set to disconnecting (which will prevent any request to move the status to connected) -* The call to `Disconnecting()`/`ConnectionLost()` will block until the previously active call completes and then - handle the disconnection. - -Reading the tests (unit_status_test.go) might help understand these rules. -*/ - -var ( - errAbortConnection = errors.New("disconnect called whist connection attempt in progress") - errAlreadyConnectedOrReconnecting = errors.New("status is already connected or reconnecting") - errStatusMustBeDisconnected = errors.New("status can only transition to connecting from disconnected") - errAlreadyDisconnected = errors.New("status is already disconnected") - errDisconnectionRequested = errors.New("disconnection was requested whilst the action was in progress") - errDisconnectionInProgress = errors.New("disconnection already in progress") - errAlreadyHandlingConnectionLoss = errors.New("status is already Connection Lost") - errConnLossWhileDisconnecting = errors.New("connection status is disconnecting so loss of connection is expected") -) - -// connectionStatus encapsulates, and protects, the connection status. -type connectionStatus struct { - sync.RWMutex // Protects the variables below - status status - willReconnect bool // only used when status == disconnecting. Indicates that an attempt will be made to reconnect (allows us to abort that) - - // Some statuses are transitional (e.g. connecting, connectionLost, reconnecting, disconnecting), that is, whatever - // process moves us into that status will move us out of it when an action is complete. Sometimes other users - // will need to know when the action is complete (e.g. the user calls `Disconnect()` whilst the status is - // `connecting`). `actionCompleted` will be set whenever we move into one of the above statues and the channel - // returned to anything else requesting a status change. The channel will be closed when the operation is complete. - actionCompleted chan struct{} // Only valid whilst status is Connecting or Reconnecting; will be closed when connection completed (success or failure) -} - -// ConnectionStatus returns the connection status. -// WARNING: the status may change at any time so users should not assume they are the only goroutine touching this -func (c *connectionStatus) ConnectionStatus() status { - c.RLock() - defer c.RUnlock() - return c.status -} - -// ConnectionStatusRetry returns the connection status and retry flag (indicates that we expect to reconnect). -// WARNING: the status may change at any time so users should not assume they are the only goroutine touching this -func (c *connectionStatus) ConnectionStatusRetry() (status, bool) { - c.RLock() - defer c.RUnlock() - return c.status, c.willReconnect -} - -// Connecting - Changes the status to connecting if that is a permitted operation -// Will do nothing unless the current status is disconnected -// Returns a function that MUST be called when the operation is complete (pass in true if successful) -func (c *connectionStatus) Connecting() (connCompletedFn, error) { - c.Lock() - defer c.Unlock() - // Calling Connect when already connecting (or if reconnecting) may not always be considered an error - if c.status == connected || c.status == reconnecting { - return nil, errAlreadyConnectedOrReconnecting - } - if c.status != disconnected { - return nil, errStatusMustBeDisconnected - } - c.status = connecting - c.actionCompleted = make(chan struct{}) - return c.connected, nil -} - -// connected is an internal function (it is returned by functions that set the status to connecting or reconnecting, -// calling it completes the operation). `success` is used to indicate whether the operation was successfully completed. -func (c *connectionStatus) connected(success bool) error { - c.Lock() - defer func() { - close(c.actionCompleted) // Alert anything waiting on the connection process to complete - c.actionCompleted = nil // Be tidy - c.Unlock() - }() - - // Status may have moved to disconnecting in the interim (i.e. at users request) - if c.status == disconnecting { - return errAbortConnection - } - if success { - c.status = connected - } else { - c.status = disconnected - } - return nil -} - -// Disconnecting - should be called when beginning the disconnection process (cleanup etc.). -// Can be called from ANY status and the end result will always be a status of disconnected -// Note that if a connection/reconnection attempt is in progress this function will set the status to `disconnecting` -// then block until the connection process completes (or aborts). -// Returns a function that MUST be called when the operation is complete (assumed to always be successful!) -func (c *connectionStatus) Disconnecting() (disconnectCompletedFn, error) { - c.Lock() - if c.status == disconnected { - c.Unlock() - return nil, errAlreadyDisconnected // May not always be treated as an error - } - if c.status == disconnecting { // Need to wait for existing process to complete - c.willReconnect = false // Ensure that the existing disconnect process will not reconnect - disConnectDone := c.actionCompleted - c.Unlock() - <-disConnectDone // Wait for existing operation to complete - return nil, errAlreadyDisconnected // Well we are now! - } - - prevStatus := c.status - c.status = disconnecting - - // We may need to wait for connection/reconnection process to complete (they should regularly check the status) - if prevStatus == connecting || prevStatus == reconnecting { - connectDone := c.actionCompleted - c.Unlock() // Safe because the only way to leave the disconnecting status is via this function - <-connectDone - - if prevStatus == reconnecting && !c.willReconnect { - return nil, errAlreadyDisconnected // Following connectionLost process we will be disconnected - } - c.Lock() - } - c.actionCompleted = make(chan struct{}) - c.Unlock() - return c.disconnectionCompleted, nil -} - -// disconnectionCompleted is an internal function (it is returned by functions that set the status to disconnecting) -func (c *connectionStatus) disconnectionCompleted() { - c.Lock() - defer c.Unlock() - c.status = disconnected - close(c.actionCompleted) // Alert anything waiting on the connection process to complete - c.actionCompleted = nil -} - -// ConnectionLost - should be called when the connection is lost. -// This really only differs from Disconnecting in that we may transition into a reconnection (but that could be -// cancelled something else calls Disconnecting in the meantime). -// The returned function should be called when cleanup is completed. It will return a function to be called when -// reconnect completes (or nil if no reconnect requested/disconnect called in the interim). -// Note: This function may block if a connection is in progress (the move to connected will be rejected) -func (c *connectionStatus) ConnectionLost(willReconnect bool) (connectionLostHandledFn, error) { - c.Lock() - defer c.Unlock() - if c.status == disconnected { - return nil, errAlreadyDisconnected - } - if c.status == disconnecting { // its expected that connection lost will be called during the disconnection process - return nil, errDisconnectionInProgress - } - - c.willReconnect = willReconnect - prevStatus := c.status - c.status = disconnecting - - // There is a slight possibility that a connection attempt is in progress (connection up and goroutines started but - // status not yet changed). By changing the status we ensure that process will exit cleanly - if prevStatus == connecting || prevStatus == reconnecting { - connectDone := c.actionCompleted - c.Unlock() // Safe because the only way to leave the disconnecting status is via this function - <-connectDone - c.Lock() - if !willReconnect { - // In this case the connection will always be aborted so there is nothing more for us to do - return nil, errAlreadyDisconnected - } - } - c.actionCompleted = make(chan struct{}) - - return c.getConnectionLostHandler(willReconnect), nil -} - -// getConnectionLostHandler is an internal function. It returns the function to be returned by ConnectionLost -func (c *connectionStatus) getConnectionLostHandler(reconnectRequested bool) connectionLostHandledFn { - return func(proceed bool) (connCompletedFn, error) { - // Note that connCompletedFn will only be provided if both reconnectRequested and proceed are true - c.Lock() - defer c.Unlock() - - // `Disconnecting()` may have been called while the disconnection was being processed (this makes it permanent!) - if !c.willReconnect || !proceed { - c.status = disconnected - close(c.actionCompleted) // Alert anything waiting on the connection process to complete - c.actionCompleted = nil - if !reconnectRequested || !proceed { - return nil, nil - } - return nil, errDisconnectionRequested - } - - c.status = reconnecting - return c.connected, nil // Note that c.actionCompleted is still live and will be closed in connected - } -} - -// forceConnectionStatus - forces the connection status to the specified value. -// This should only be used when there is no alternative (i.e. only in tests and to recover from situations that -// are unexpected) -func (c *connectionStatus) forceConnectionStatus(s status) { - c.Lock() - defer c.Unlock() - c.status = s -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/store.go b/vendor/github.com/eclipse/paho.mqtt.golang/store.go deleted file mode 100644 index f50873cd..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/store.go +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import ( - "fmt" - "strconv" - - "github.com/eclipse/paho.mqtt.golang/packets" -) - -const ( - inboundPrefix = "i." - outboundPrefix = "o." -) - -// Store is an interface which can be used to provide implementations -// for message persistence. -// Because we may have to store distinct messages with the same -// message ID, we need a unique key for each message. This is -// possible by prepending "i." or "o." to each message id -type Store interface { - Open() - Put(key string, message packets.ControlPacket) - Get(key string) packets.ControlPacket - All() []string - Del(key string) - Close() - Reset() -} - -// A key MUST have the form "X.[messageid]" -// where X is 'i' or 'o' -func mIDFromKey(key string) uint16 { - s := key[2:] - i, err := strconv.ParseUint(s, 10, 16) - chkerr(err) - return uint16(i) -} - -// Return true if key prefix is outbound -func isKeyOutbound(key string) bool { - return key[:2] == outboundPrefix -} - -// Return true if key prefix is inbound -func isKeyInbound(key string) bool { - return key[:2] == inboundPrefix -} - -// Return a string of the form "i.[id]" -func inboundKeyFromMID(id uint16) string { - return fmt.Sprintf("%s%d", inboundPrefix, id) -} - -// Return a string of the form "o.[id]" -func outboundKeyFromMID(id uint16) string { - return fmt.Sprintf("%s%d", outboundPrefix, id) -} - -// govern which outgoing messages are persisted -func persistOutbound(s Store, m packets.ControlPacket) { - switch m.Details().Qos { - case 0: - switch m.(type) { - case *packets.PubackPacket, *packets.PubcompPacket: - // Sending puback. delete matching publish - // from ibound - s.Del(inboundKeyFromMID(m.Details().MessageID)) - } - case 1: - switch m.(type) { - case *packets.PublishPacket, *packets.PubrelPacket, *packets.SubscribePacket, *packets.UnsubscribePacket: - // Sending publish. store in obound - // until puback received - s.Put(outboundKeyFromMID(m.Details().MessageID), m) - default: - ERROR.Println(STR, "Asked to persist an invalid message type") - } - case 2: - switch m.(type) { - case *packets.PublishPacket: - // Sending publish. store in obound - // until pubrel received - s.Put(outboundKeyFromMID(m.Details().MessageID), m) - default: - ERROR.Println(STR, "Asked to persist an invalid message type") - } - } -} - -// govern which incoming messages are persisted -func persistInbound(s Store, m packets.ControlPacket) { - switch m.Details().Qos { - case 0: - switch m.(type) { - case *packets.PubackPacket, *packets.SubackPacket, *packets.UnsubackPacket, *packets.PubcompPacket: - // Received a puback. delete matching publish - // from obound - s.Del(outboundKeyFromMID(m.Details().MessageID)) - case *packets.PublishPacket, *packets.PubrecPacket, *packets.PingrespPacket, *packets.ConnackPacket: - default: - ERROR.Println(STR, "Asked to persist an invalid messages type") - } - case 1: - switch m.(type) { - case *packets.PublishPacket, *packets.PubrelPacket: - // Received a publish. store it in ibound - // until puback sent - s.Put(inboundKeyFromMID(m.Details().MessageID), m) - default: - ERROR.Println(STR, "Asked to persist an invalid messages type") - } - case 2: - switch m.(type) { - case *packets.PublishPacket: - // Received a publish. store it in ibound - // until pubrel received - s.Put(inboundKeyFromMID(m.Details().MessageID), m) - default: - ERROR.Println(STR, "Asked to persist an invalid messages type") - } - } -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/token.go b/vendor/github.com/eclipse/paho.mqtt.golang/token.go deleted file mode 100644 index 996ab5b0..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/token.go +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Allan Stockdill-Mander - */ - -package mqtt - -import ( - "sync" - "time" - - "github.com/eclipse/paho.mqtt.golang/packets" -) - -// PacketAndToken is a struct that contains both a ControlPacket and a -// Token. This struct is passed via channels between the client interface -// code and the underlying code responsible for sending and receiving -// MQTT messages. -type PacketAndToken struct { - p packets.ControlPacket - t tokenCompletor -} - -// Token defines the interface for the tokens used to indicate when -// actions have completed. -type Token interface { - // Wait will wait indefinitely for the Token to complete, ie the Publish - // to be sent and confirmed receipt from the broker. - Wait() bool - - // WaitTimeout takes a time.Duration to wait for the flow associated with the - // Token to complete, returns true if it returned before the timeout or - // returns false if the timeout occurred. In the case of a timeout the Token - // does not have an error set in case the caller wishes to wait again. - WaitTimeout(time.Duration) bool - - // Done returns a channel that is closed when the flow associated - // with the Token completes. Clients should call Error after the - // channel is closed to check if the flow completed successfully. - // - // Done is provided for use in select statements. Simple use cases may - // use Wait or WaitTimeout. - Done() <-chan struct{} - - Error() error -} - -type TokenErrorSetter interface { - setError(error) -} - -type tokenCompletor interface { - Token - TokenErrorSetter - flowComplete() -} - -type baseToken struct { - m sync.RWMutex - complete chan struct{} - err error -} - -// Wait implements the Token Wait method. -func (b *baseToken) Wait() bool { - <-b.complete - return true -} - -// WaitTimeout implements the Token WaitTimeout method. -func (b *baseToken) WaitTimeout(d time.Duration) bool { - timer := time.NewTimer(d) - select { - case <-b.complete: - if !timer.Stop() { - <-timer.C - } - return true - case <-timer.C: - } - - return false -} - -// Done implements the Token Done method. -func (b *baseToken) Done() <-chan struct{} { - return b.complete -} - -func (b *baseToken) flowComplete() { - select { - case <-b.complete: - default: - close(b.complete) - } -} - -func (b *baseToken) Error() error { - b.m.RLock() - defer b.m.RUnlock() - return b.err -} - -func (b *baseToken) setError(e error) { - b.m.Lock() - b.err = e - b.flowComplete() - b.m.Unlock() -} - -func newToken(tType byte) tokenCompletor { - switch tType { - case packets.Connect: - return &ConnectToken{baseToken: baseToken{complete: make(chan struct{})}} - case packets.Subscribe: - return &SubscribeToken{baseToken: baseToken{complete: make(chan struct{})}, subResult: make(map[string]byte)} - case packets.Publish: - return &PublishToken{baseToken: baseToken{complete: make(chan struct{})}} - case packets.Unsubscribe: - return &UnsubscribeToken{baseToken: baseToken{complete: make(chan struct{})}} - case packets.Disconnect: - return &DisconnectToken{baseToken: baseToken{complete: make(chan struct{})}} - } - return nil -} - -// ConnectToken is an extension of Token containing the extra fields -// required to provide information about calls to Connect() -type ConnectToken struct { - baseToken - returnCode byte - sessionPresent bool -} - -// ReturnCode returns the acknowledgement code in the connack sent -// in response to a Connect() -func (c *ConnectToken) ReturnCode() byte { - c.m.RLock() - defer c.m.RUnlock() - return c.returnCode -} - -// SessionPresent returns a bool representing the value of the -// session present field in the connack sent in response to a Connect() -func (c *ConnectToken) SessionPresent() bool { - c.m.RLock() - defer c.m.RUnlock() - return c.sessionPresent -} - -// PublishToken is an extension of Token containing the extra fields -// required to provide information about calls to Publish() -type PublishToken struct { - baseToken - messageID uint16 -} - -// MessageID returns the MQTT message ID that was assigned to the -// Publish packet when it was sent to the broker -func (p *PublishToken) MessageID() uint16 { - return p.messageID -} - -// SubscribeToken is an extension of Token containing the extra fields -// required to provide information about calls to Subscribe() -type SubscribeToken struct { - baseToken - subs []string - subResult map[string]byte - messageID uint16 -} - -// Result returns a map of topics that were subscribed to along with -// the matching return code from the broker. This is either the Qos -// value of the subscription or an error code. -func (s *SubscribeToken) Result() map[string]byte { - s.m.RLock() - defer s.m.RUnlock() - return s.subResult -} - -// UnsubscribeToken is an extension of Token containing the extra fields -// required to provide information about calls to Unsubscribe() -type UnsubscribeToken struct { - baseToken - messageID uint16 -} - -// DisconnectToken is an extension of Token containing the extra fields -// required to provide information about calls to Disconnect() -type DisconnectToken struct { - baseToken -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/topic.go b/vendor/github.com/eclipse/paho.mqtt.golang/topic.go deleted file mode 100644 index 966540ae..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/topic.go +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import ( - "errors" - "strings" -) - -// ErrInvalidQos is the error returned when an packet is to be sent -// with an invalid Qos value -var ErrInvalidQos = errors.New("invalid QoS") - -// ErrInvalidTopicEmptyString is the error returned when a topic string -// is passed in that is 0 length -var ErrInvalidTopicEmptyString = errors.New("invalid Topic; empty string") - -// ErrInvalidTopicMultilevel is the error returned when a topic string -// is passed in that has the multi level wildcard in any position but -// the last -var ErrInvalidTopicMultilevel = errors.New("invalid Topic; multi-level wildcard must be last level") - -// Topic Names and Topic Filters -// The MQTT v3.1.1 spec clarifies a number of ambiguities with regard -// to the validity of Topic strings. -// - A Topic must be between 1 and 65535 bytes. -// - A Topic is case sensitive. -// - A Topic may contain whitespace. -// - A Topic containing a leading forward slash is different than a Topic without. -// - A Topic may be "/" (two levels, both empty string). -// - A Topic must be UTF-8 encoded. -// - A Topic may contain any number of levels. -// - A Topic may contain an empty level (two forward slashes in a row). -// - A TopicName may not contain a wildcard. -// - A TopicFilter may only have a # (multi-level) wildcard as the last level. -// - A TopicFilter may contain any number of + (single-level) wildcards. -// - A TopicFilter with a # will match the absence of a level -// Example: a subscription to "foo/#" will match messages published to "foo". - -func validateSubscribeMap(subs map[string]byte) ([]string, []byte, error) { - if len(subs) == 0 { - return nil, nil, errors.New("invalid subscription; subscribe map must not be empty") - } - - var topics []string - var qoss []byte - for topic, qos := range subs { - if err := validateTopicAndQos(topic, qos); err != nil { - return nil, nil, err - } - topics = append(topics, topic) - qoss = append(qoss, qos) - } - - return topics, qoss, nil -} - -func validateTopicAndQos(topic string, qos byte) error { - if len(topic) == 0 { - return ErrInvalidTopicEmptyString - } - - levels := strings.Split(topic, "/") - for i, level := range levels { - if level == "#" && i != len(levels)-1 { - return ErrInvalidTopicMultilevel - } - } - - if qos > 2 { - return ErrInvalidQos - } - return nil -} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/trace.go b/vendor/github.com/eclipse/paho.mqtt.golang/trace.go deleted file mode 100644 index b07b6042..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/trace.go +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2021 IBM Corp and others. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -type ( - // Logger interface allows implementations to provide to this package any - // object that implements the methods defined in it. - Logger interface { - Println(v ...interface{}) - Printf(format string, v ...interface{}) - } - - // NOOPLogger implements the logger that does not perform any operation - // by default. This allows us to efficiently discard the unwanted messages. - NOOPLogger struct{} -) - -func (NOOPLogger) Println(v ...interface{}) {} -func (NOOPLogger) Printf(format string, v ...interface{}) {} - -// Internal levels of library output that are initialised to not print -// anything but can be overridden by programmer -var ( - ERROR Logger = NOOPLogger{} - CRITICAL Logger = NOOPLogger{} - WARN Logger = NOOPLogger{} - DEBUG Logger = NOOPLogger{} -) diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/websocket.go b/vendor/github.com/eclipse/paho.mqtt.golang/websocket.go deleted file mode 100644 index e0f2583e..00000000 --- a/vendor/github.com/eclipse/paho.mqtt.golang/websocket.go +++ /dev/null @@ -1,132 +0,0 @@ -/* - * This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v2.0 - * and Eclipse Distribution License v1.0 which accompany this distribution. - * - * The Eclipse Public License is available at - * https://www.eclipse.org/legal/epl-2.0/ - * and the Eclipse Distribution License is available at - * http://www.eclipse.org/org/documents/edl-v10.php. - * - * Contributors: - */ - -package mqtt - -import ( - "crypto/tls" - "fmt" - "io" - "net" - "net/http" - "net/url" - "sync" - "time" - - "github.com/gorilla/websocket" -) - -// WebsocketOptions are config options for a websocket dialer -type WebsocketOptions struct { - ReadBufferSize int - WriteBufferSize int - Proxy ProxyFunction -} - -type ProxyFunction func(req *http.Request) (*url.URL, error) - -// NewWebsocket returns a new websocket and returns a net.Conn compatible interface using the gorilla/websocket package -func NewWebsocket(host string, tlsc *tls.Config, timeout time.Duration, requestHeader http.Header, options *WebsocketOptions) (net.Conn, error) { - if timeout == 0 { - timeout = 10 * time.Second - } - - if options == nil { - // Apply default options - options = &WebsocketOptions{} - } - if options.Proxy == nil { - options.Proxy = http.ProxyFromEnvironment - } - dialer := &websocket.Dialer{ - Proxy: options.Proxy, - HandshakeTimeout: timeout, - EnableCompression: false, - TLSClientConfig: tlsc, - Subprotocols: []string{"mqtt"}, - ReadBufferSize: options.ReadBufferSize, - WriteBufferSize: options.WriteBufferSize, - } - - ws, resp, err := dialer.Dial(host, requestHeader) - - if err != nil { - if resp != nil { - WARN.Println(CLI, fmt.Sprintf("Websocket handshake failure. StatusCode: %d. Body: %s", resp.StatusCode, resp.Body)) - } - return nil, err - } - - wrapper := &websocketConnector{ - Conn: ws, - } - return wrapper, err -} - -// websocketConnector is a websocket wrapper so it satisfies the net.Conn interface so it is a -// drop in replacement of the golang.org/x/net/websocket package. -// Implementation guide taken from https://github.com/gorilla/websocket/issues/282 -type websocketConnector struct { - *websocket.Conn - r io.Reader - rio sync.Mutex - wio sync.Mutex -} - -// SetDeadline sets both the read and write deadlines -func (c *websocketConnector) SetDeadline(t time.Time) error { - if err := c.SetReadDeadline(t); err != nil { - return err - } - err := c.SetWriteDeadline(t) - return err -} - -// Write writes data to the websocket -func (c *websocketConnector) Write(p []byte) (int, error) { - c.wio.Lock() - defer c.wio.Unlock() - - err := c.WriteMessage(websocket.BinaryMessage, p) - if err != nil { - return 0, err - } - return len(p), nil -} - -// Read reads the current websocket frame -func (c *websocketConnector) Read(p []byte) (int, error) { - c.rio.Lock() - defer c.rio.Unlock() - for { - if c.r == nil { - // Advance to next message. - var err error - _, c.r, err = c.NextReader() - if err != nil { - return 0, err - } - } - n, err := c.r.Read(p) - if err == io.EOF { - // At end of message. - c.r = nil - if n > 0 { - return n, nil - } - // No data read, continue to next message. - continue - } - return n, err - } -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/LICENSE b/vendor/github.com/edgexfoundry/go-mod-core-contracts/LICENSE deleted file mode 100644 index aad1af23..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2017 Dell, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/constants.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/constants.go deleted file mode 100644 index fb311f66..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/constants.go +++ /dev/null @@ -1,81 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package clients - -// Do not assume that if a constant is identified by your IDE as not being used within this module that it is not being -// used at all. Any application wishing to exchange information with the EdgeX core services will utilize this module, -// so constants located here may be used externally. -// -// Miscellaneous constants -const ( - ClientMonitorDefault = 15000 // Defaults the interval at which a given service client will refresh its endpoint from the Registry, if used - CorrelationHeader = "correlation-id" // Sets the key of the Correlation ID HTTP header -) - -// Constants related to defined routes in the service APIs -const ( - ApiVersionRoute = "/api/version" - ApiBase = "/api/v1" - ApiAddressableRoute = "/api/v1/addressable" - ApiCallbackRoute = "/api/v1/callback" - ApiCommandRoute = "/api/v1/command" - ApiConfigRoute = "/api/v1/config" - ApiDeviceRoute = "/api/v1/device" - ApiDeviceProfileRoute = "/api/v1/deviceprofile" - ApiDeviceServiceRoute = "/api/v1/deviceservice" - ApiEventRoute = "/api/v1/event" - ApiHealthRoute = "/api/v1/health" - ApiLoggingRoute = "/api/v1/logs" - ApiMetricsRoute = "/api/v1/metrics" - ApiNotificationRoute = "/api/v1/notification" - ApiNotifyRegistrationRoute = "/api/v1/notify/registrations" - ApiOperationRoute = "/api/v1/operation" - ApiPingRoute = "/api/v1/ping" - ApiProvisionWatcherRoute = "/api/v1/provisionwatcher" - ApiReadingRoute = "/api/v1/reading" - ApiRegistrationRoute = "/api/v1/registration" - ApiRegistrationByNameRoute = ApiRegistrationRoute + "/name" - ApiSubscriptionRoute = "/api/v1/subscription" - ApiTransmissionRoute = "/api/v1/transmission" - ApiValueDescriptorRoute = "/api/v1/valuedescriptor" - ApiIntervalRoute = "/api/v1/interval" - ApiIntervalActionRoute = "/api/v1/intervalaction" -) - -// Constants related to how services identify themselves in the Service Registry -const ( - ServiceKeyPrefix = "edgex-" - ConfigSeedServiceKey = "edgex-config-seed" - CoreCommandServiceKey = "edgex-core-command" - CoreDataServiceKey = "edgex-core-data" - CoreMetaDataServiceKey = "edgex-core-metadata" - SupportLoggingServiceKey = "edgex-support-logging" - SupportNotificationsServiceKey = "edgex-support-notifications" - SystemManagementAgentServiceKey = "edgex-sys-mgmt-agent" - SupportSchedulerServiceKey = "edgex-support-scheduler" - SecuritySecretStoreSetupServiceKey = "edgex-security-secretstore-setup" - SecuritySecretsSetupServiceKey = "edgex-security-secrets-setup" - SecurityProxySetupServiceKey = "edgex-security-proxy-setup" - SecurityFileTokenProviderServiceKey = "edgex-security-file-token-provider" -) - -// Constants related to the possible content types supported by the APIs -const ( - ContentType = "Content-Type" - ContentTypeCBOR = "application/cbor" - ContentTypeJSON = "application/json" - ContentTypeYAML = "application/x-yaml" - ContentTypeText = "text/plain" -) diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/context.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/context.go deleted file mode 100644 index 6f14a2f1..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/context.go +++ /dev/null @@ -1,29 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package clients - -import ( - "context" -) - -// FromContext allows for the retrieval of the specified key's value from the supplied Context. -// If the value is not found, an empty string is returned. -func FromContext(ctx context.Context, key string) string { - hdr, ok := ctx.Value(key).(string) - if !ok { - hdr = "" - } - return hdr -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/doc.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/doc.go deleted file mode 100644 index 6547b53e..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -/* -Package clients provides REST-based integration with the core APIs of the EdgeX Foundry platform. - -Each individual service client can be found in its respective package within clients. View the Subdirectories section -below for more information. - -While it is certainly possible to utilize the exported functions in this package to make calls to a given service, it -is recommended (unless you really specifically know what you're doing) to use the service clients instead. The functions -here are exported primarily for the use of the service clients. - -*/ -package clients diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/interfaces/url.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/interfaces/url.go deleted file mode 100644 index 1391b7a0..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/interfaces/url.go +++ /dev/null @@ -1,26 +0,0 @@ -/******************************************************************************* - * Copyright 2020 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package interfaces - -// URLClient is the interface for types that need to define some way to retrieve URLClient information about services. -// This information can be anything that must be determined at runtime, whether it is unknown or simply not yet known. -type URLClient interface { - // Prefix returns the URLClient base path (or root) of a service. - // This is the common root of all REST calls to the service, - // and is defined on a per service (rather than per endpoint) basis. - // Prefix returns the root URLClient for REST calls to the service if it was able to retrieve that URLClient; - // it returns an error otherwise. - Prefix() (string, error) -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/interfaces/urlstream.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/interfaces/urlstream.go deleted file mode 100644 index f3718d32..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/interfaces/urlstream.go +++ /dev/null @@ -1,17 +0,0 @@ -/******************************************************************************* - * Copyright 2020 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package interfaces - -type URLStream string diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/request.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/request.go deleted file mode 100644 index 6db931d9..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/request.go +++ /dev/null @@ -1,353 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * Copyright 2019 Joan Duran - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package clients - -import ( - "bytes" - "context" - "encoding/json" - "github.com/google/uuid" - "io" - "io/ioutil" - "mime/multipart" - "net/http" - "path/filepath" - "strconv" - - "github.com/edgexfoundry/go-mod-core-contracts/clients/interfaces" - "github.com/edgexfoundry/go-mod-core-contracts/clients/types" -) - -// Helper method to get the body from the response after making the request -func getBody(resp *http.Response) ([]byte, error) { - body, err := ioutil.ReadAll(resp.Body) - return body, err -} - -// Helper method to make the request and return the response -func makeRequest(req *http.Request) (*http.Response, error) { - client := &http.Client{} - resp, err := client.Do(req) - - return resp, err -} - -// GetRequest will make a GET request to the specified URL with the root URL retrieved by the URLClient prepended. -// It returns the body as a byte array if successful and an error otherwise. -func GetRequest(ctx context.Context, urlSuffix string, urlClient interfaces.URLClient) ([]byte, error) { - urlPrefix, err := urlClient.Prefix() - if err != nil { - return nil, err - } - - return GetRequestWithURL(ctx, urlPrefix+urlSuffix) -} - -// GetRequestWithURL will make a GET request to the specified URL. -// It returns the body as a byte array if successful and an error otherwise. -func GetRequestWithURL(ctx context.Context, url string) ([]byte, error) { - req, err := http.NewRequest(http.MethodGet, url, nil) - if err != nil { - return nil, err - } - - c := NewCorrelatedRequest(ctx, req) - resp, err := makeRequest(c.Request) - if err != nil { - return nil, err - } - if resp == nil { - return nil, types.ErrResponseNil{} - } - defer resp.Body.Close() - - bodyBytes, err := getBody(resp) - if err != nil { - return nil, err - } - - if (resp.StatusCode != http.StatusOK) && (resp.StatusCode != http.StatusAccepted) { - return nil, types.NewErrServiceClient(resp.StatusCode, bodyBytes) - } - - return bodyBytes, nil -} - -// Helper method to make the count request -func CountRequest(ctx context.Context, urlSuffix string, urlClient interfaces.URLClient) (int, error) { - // do not get URLPrefix here since GetRequest does it - data, err := GetRequest(ctx, urlSuffix, urlClient) - if err != nil { - return 0, err - } - - count, err := strconv.Atoi(string(data)) - if err != nil { - return 0, err - } - return count, nil -} - -// Helper method to make the post JSON request and return the body -func PostJSONRequest( - ctx context.Context, - urlSuffix string, - data interface{}, - urlClient interfaces.URLClient) (string, error) { - - jsonStr, err := json.Marshal(data) - if err != nil { - return "", err - } - - ctx = context.WithValue(ctx, ContentType, ContentTypeJSON) - - // do not get URLPrefix here since PostRequest does it - return PostRequest(ctx, urlSuffix, jsonStr, urlClient) -} - -// PostJSONRequestWithURL will make a POST request to the specified URL with the object passed in -// marshaled into a JSON formatted byte array. -// It returns the body on success and an error otherwise. -func PostJSONRequestWithURL(ctx context.Context, url string, data interface{}) (string, error) { - jsonStr, err := json.Marshal(data) - if err != nil { - return "", err - } - - ctx = context.WithValue(ctx, ContentType, ContentTypeJSON) - - return PostRequestWithURL(ctx, url, jsonStr) -} - -// Helper method to make the post request and return the body -func PostRequest(ctx context.Context, urlSuffix string, data []byte, urlClient interfaces.URLClient) (string, error) { - urlPrefix, err := urlClient.Prefix() - if err != nil { - return "", err - } - - return PostRequestWithURL(ctx, urlPrefix+urlSuffix, data) -} - -// PostRequestWithURL will make a POST request to the specified URL. -// It returns the body as a byte array if successful and an error otherwise. -func PostRequestWithURL(ctx context.Context, url string, data []byte) (string, error) { - content := FromContext(ctx, ContentType) - if content == "" { - content = ContentTypeJSON - } - - req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(data)) - if err != nil { - return "", err - } - req.Header.Set(ContentType, content) - - c := NewCorrelatedRequest(ctx, req) - resp, err := makeRequest(c.Request) - if err != nil { - return "", err - } - if resp == nil { - return "", types.ErrResponseNil{} - } - defer resp.Body.Close() - - bodyBytes, err := getBody(resp) - if err != nil { - return "", err - } - - if (resp.StatusCode != http.StatusOK) && (resp.StatusCode != http.StatusAccepted) { - return "", types.NewErrServiceClient(resp.StatusCode, bodyBytes) - } - - bodyString := string(bodyBytes) - return bodyString, nil -} - -// Helper method to make a post request in order to upload a file and return the request body -func UploadFileRequest( - ctx context.Context, - urlSuffix string, - filePath string, - urlClient interfaces.URLClient) (string, error) { - - fileContents, err := ioutil.ReadFile(filePath) - if err != nil { - return "", err - } - - // Create multipart/form-data request - body := &bytes.Buffer{} - writer := multipart.NewWriter(body) - formFileWriter, err := writer.CreateFormFile("file", filepath.Base(filePath)) - if err != nil { - return "", err - } - _, err = io.Copy(formFileWriter, bytes.NewReader(fileContents)) - if err != nil { - return "", err - } - writer.Close() - - urlPrefix, err := urlClient.Prefix() - if err != nil { - return "", err - } - - req, err := http.NewRequest(http.MethodPost, urlPrefix+urlSuffix, body) - if err != nil { - return "", err - } - req.Header.Add(ContentType, writer.FormDataContentType()) - - c := NewCorrelatedRequest(ctx, req) - resp, err := makeRequest(c.Request) - if err != nil { - return "", err - } - if resp == nil { - return "", types.ErrResponseNil{} - } - defer resp.Body.Close() - - bodyBytes, err := getBody(resp) - if err != nil { - return "", err - } - - if (resp.StatusCode != http.StatusOK) && (resp.StatusCode != http.StatusAccepted) { - return "", types.NewErrServiceClient(resp.StatusCode, bodyBytes) - } - - bodyString := string(bodyBytes) - return bodyString, nil -} - -// Helper method to make the update request -func UpdateRequest(ctx context.Context, urlSuffix string, data interface{}, urlClient interfaces.URLClient) error { - jsonStr, err := json.Marshal(data) - if err != nil { - return err - } - - // do not get URLPrefix here since PutRequest does it - _, err = PutRequest(ctx, urlSuffix, jsonStr, urlClient) - return err -} - -// Helper method to make the put request -func PutRequest(ctx context.Context, urlSuffix string, body []byte, urlClient interfaces.URLClient) (string, error) { - var err error - var req *http.Request - - urlPrefix, err := urlClient.Prefix() - if err != nil { - return "", err - } - if body != nil { - req, err = http.NewRequest(http.MethodPut, urlPrefix+urlSuffix, bytes.NewReader(body)) - if err != nil { - return "", err - } - - content := FromContext(ctx, ContentType) - if content == "" { - content = ContentTypeJSON - } - req.Header.Set(ContentType, content) - } else { - req, err = http.NewRequest(http.MethodPut, urlPrefix+urlSuffix, nil) - } - if err != nil { - return "", err - } - - c := NewCorrelatedRequest(ctx, req) - resp, err := makeRequest(c.Request) - if err != nil { - return "", err - } - if resp == nil { - return "", types.ErrResponseNil{} - } - defer resp.Body.Close() - - bodyBytes, err := getBody(resp) - if err != nil { - return "", err - } - - if (resp.StatusCode != http.StatusOK) && (resp.StatusCode != http.StatusAccepted) { - return "", types.NewErrServiceClient(resp.StatusCode, bodyBytes) - } - - bodyString := string(bodyBytes) - return bodyString, nil -} - -// Helper method to make the delete request -func DeleteRequest(ctx context.Context, urlSuffix string, urlClient interfaces.URLClient) error { - urlPrefix, err := urlClient.Prefix() - if err != nil { - return err - } - - req, err := http.NewRequest(http.MethodDelete, urlPrefix+urlSuffix, nil) - if err != nil { - return err - } - - c := NewCorrelatedRequest(ctx, req) - resp, err := makeRequest(c.Request) - if err != nil { - return err - } - if resp == nil { - return types.ErrResponseNil{} - } - defer resp.Body.Close() - - if (resp.StatusCode != http.StatusOK) && (resp.StatusCode != http.StatusAccepted) { - bodyBytes, err := getBody(resp) - if err != nil { - return err - } - - return types.NewErrServiceClient(resp.StatusCode, bodyBytes) - } - - return nil -} - -// CorrelatedRequest is a wrapper type for use in managing Correlation IDs during service to service API calls. -type CorrelatedRequest struct { - *http.Request -} - -// NewCorrelatedRequest will add the Correlation ID header to the supplied request. If no Correlation ID header is -// present in the supplied context, one will be created along with a value. -func NewCorrelatedRequest(ctx context.Context, req *http.Request) CorrelatedRequest { - c := CorrelatedRequest{Request: req} - correlation := FromContext(ctx, CorrelationHeader) - if len(correlation) == 0 { - correlation = uuid.New().String() - } - c.Header.Set(CorrelationHeader, correlation) - return c -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/types/errors.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/types/errors.go deleted file mode 100644 index e462ae61..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/clients/types/errors.go +++ /dev/null @@ -1,51 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package types - -import "fmt" - -// ErrNotFound represents an error returned from a service indicating the item being asked for was not found. -type ErrNotFound struct{} - -func (e ErrNotFound) Error() string { - return "Item not found" -} - -// ErrResponseNil represents an error returned from a service indicating the response was unexpectedly empty. -type ErrResponseNil struct{} - -func (e ErrResponseNil) Error() string { - return "Response was nil" -} - -// ErrServiceClient exposes the details of a service's response in a more granular manner. This is useful when service A -// calls service B and service A needs to make a decision with regard to how it should respond to its own caller based on -// the error thrown from service B. -type ErrServiceClient struct { - StatusCode int // StatusCode contains the HTTP status code returned from the target service - bodyBytes []byte // bodyBytes contains the response from the target service - errMsg string // errMsg contains the error message to be returned. See the Error() method below. -} - -// NewErrServiceClient returns an instance of the error interface with ErrServiceClient as its implementation. -func NewErrServiceClient(statusCode int, body []byte) error { - e := ErrServiceClient{StatusCode: statusCode, bodyBytes: body} - return e -} - -// Error fulfills the error interface and returns an error message assembled from the state of ErrServiceClient. -func (e ErrServiceClient) Error() string { - return fmt.Sprintf("%d - %s", e.StatusCode, e.bodyBytes) -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/action.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/action.go deleted file mode 100644 index 7d78819c..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/action.go +++ /dev/null @@ -1,35 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import ( - "encoding/json" -) - -// Action describes state related to the capabilities of a device -type Action struct { - Path string `json:"path,omitempty" yaml:"path,omitempty"` // Path used by service for action on a device or sensor - Responses []Response `json:"responses,omitempty" yaml:"responses,omitempty"` // Responses from get or put requests to service - URL string `json:"url,omitempty" yaml:"url,omitempty"` // Url for requests from command service -} - -// String returns a JSON formatted string representation of the Action -func (a Action) String() string { - out, err := json.Marshal(a) - if err != nil { - return err.Error() - } - return string(out) -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/actiontype.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/actiontype.go deleted file mode 100644 index ef19ef54..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/actiontype.go +++ /dev/null @@ -1,30 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -// ActionType indicates the various types of actions -type ActionType string - -const ( - PROFILE ActionType = "PROFILE" - DEVICE = "DEVICE" - SERVICE = "SERVICE" - SCHEDULE = "SCHEDULE" - SCHEDULEEVENT = "SCHEDULEEVENT" - ADDRESSABLE = "ADDRESSABLE" - VALUEDESCRIPTOR = "VALUEDESCRIPTOR" - PROVISIONWATCHER = "PROVISIONWATCHER" - REPORT = "REPORT" -) diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/addressable.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/addressable.go deleted file mode 100644 index 51018b89..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/addressable.go +++ /dev/null @@ -1,120 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import ( - "encoding/json" - "strconv" - "strings" -) - -// Addressable holds information indicating how to contact a specific endpoint -type Addressable struct { - Timestamps - Id string `json:"id,omitempty"` // ID is a unique identifier for the Addressable, such as a UUID - Name string `json:"name,omitempty"` // Name is a unique name given to the Addressable - Protocol string `json:"protocol,omitempty"` // Protocol for the address (HTTP/TCP) - HTTPMethod string `json:"method,omitempty"` // Method for connecting (i.e. POST) - Address string `json:"address,omitempty"` // Address of the addressable - Port int `json:"port,omitempty,Number"` // Port for the address - Path string `json:"path,omitempty"` // Path for callbacks - Publisher string `json:"publisher,omitempty"` // For message bus protocols - User string `json:"user,omitempty"` // User id for authentication - Password string `json:"password,omitempty"` // Password of the user for authentication for the addressable - Topic string `json:"topic,omitempty"` // Topic for message bus addressables - isValidated bool // internal member used for validation check -} - -type addressableAlias Addressable - -// MarshalJSON implements the Marshaler interface for the Addressable type -// Use custom logic to create the URL and Base URL -func (a Addressable) MarshalJSON() ([]byte, error) { - aux := struct { - addressableAlias - BaseURL string `json:"baseURL,omitempty"` - URL string `json:"url,omitempty"` - }{ - addressableAlias: addressableAlias(a), - } - - if a.Protocol != "" && a.Address != "" { - // Get the base URL - aux.BaseURL = a.GetBaseURL() - - // Get the URL - aux.URL = aux.BaseURL - if a.Publisher == "" && a.Topic != "" { - aux.URL += a.Topic + "/" - } - aux.URL += a.Path - } - - return json.Marshal(aux) -} - -// UnmarshalJSON implements the Unmarshaler interface for the Addressable type -func (a *Addressable) UnmarshalJSON(data []byte) error { - var err error - var alias addressableAlias - if err = json.Unmarshal(data, &alias); err != nil { - return err - } - - *a = Addressable(alias) - a.isValidated, err = a.Validate() - - return err -} - -// Validate satisfies the Validator interface -func (a Addressable) Validate() (bool, error) { - if !a.isValidated { - if a.Id == "" && a.Name == "" { - return false, NewErrContractInvalid("Addressable ID and Name are both blank") - } - return true, nil - } - return a.isValidated, nil -} - -// String returns a JSON encoded string representation of the addressable. -func (a Addressable) String() string { - out, err := json.Marshal(a) - if err != nil { - return err.Error() - } - return string(out) -} - -// GetBaseURL returns a base URL consisting of protocol, host and port as a string assembled from the constituent parts of the Addressable -func (a Addressable) GetBaseURL() string { - protocol := strings.ToLower(a.Protocol) - address := a.Address - port := strconv.Itoa(a.Port) - baseUrl := protocol + "://" + address + ":" + port - return baseUrl -} - -// GetCallbackURL returns the callback url for the addressable if all relevant tokens have values. -// If any token is missing, string will be empty. Tokens include protocol, address, port and path. -func (a Addressable) GetCallbackURL() string { - url := "" - if len(a.Protocol) > 0 && len(a.Address) > 0 && a.Port > 0 && len(a.Path) > 0 { - url = a.GetBaseURL() + a.Path - } - - return url -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/adminstate.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/adminstate.go deleted file mode 100644 index 79633093..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/adminstate.go +++ /dev/null @@ -1,69 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import ( - "encoding/json" - "fmt" - "strings" -) - -// AdminState controls the range of values which constitute valid administrative states for a device -type AdminState string - -const ( - // Locked : device is locked - // Unlocked : device is unlocked - Locked = "LOCKED" - Unlocked = "UNLOCKED" -) - -// UnmarshalJSON implements the Unmarshaler interface for the enum type -func (as *AdminState) UnmarshalJSON(data []byte) error { - // Extract the string from data. - var s string - if err := json.Unmarshal(data, &s); err != nil { - return fmt.Errorf("AdminState should be a string, got %s", data) - } - - new := AdminState(strings.ToUpper(s)) - *as = new - - return nil -} - -// Validate satisfies the Validator interface -func (as AdminState) Validate() (bool, error) { - _, found := map[string]AdminState{"LOCKED": Locked, "UNLOCKED": Unlocked}[string(as)] - if !found { - return false, NewErrContractInvalid(fmt.Sprintf("invalid AdminState %q", as)) - } - return true, nil -} - -// GetAdminState is called from within the router logic of the core services. For example, there are PUT calls -// like the one below from core-metadata which specify their update parameters in the URL -// -// d.HandleFunc("/{"+ID+"}/"+URLADMINSTATE+"/{"+ADMINSTATE+"}", restSetDeviceAdminStateById).Methods(http.MethodPut) -// -// Updates like this should be refactored to pass a body containing the new values instead of via the URL. This -// would allow us to utilize the model validation above and remove the logic from the controller. -// -// This will be removed once work on the following issue begins -- https://github.com/edgexfoundry/edgex-go/issues/1244 -func GetAdminState(as string) (AdminState, bool) { - as = strings.ToUpper(as) - retValue, err := map[string]AdminState{"LOCKED": Locked, "UNLOCKED": Unlocked}[as] - return retValue, err -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/autoevent.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/autoevent.go deleted file mode 100644 index 4de7c6a9..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/autoevent.go +++ /dev/null @@ -1,43 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import "encoding/json" - -// AutoEvent supports auto-generated events sourced from a device service -type AutoEvent struct { - // Frequency indicates how often the specific resource needs to be polled. - // It represents as a duration string. - // The format of this field is to be an unsigned integer followed by a unit which may be "ms", "s", "m" or "h" - // representing milliseconds, seconds, minutes or hours. Eg, "100ms", "24h" - Frequency string `json:"frequency,omitempty"` - // OnChange indicates whether the device service will generate an event only, - // if the reading value is different from the previous one. - // If true, only generate events when readings change - OnChange bool `json:"onChange,omitempty"` - // Resource indicates the name of the resource in the device profile which describes the event to generate - Resource string `json:"resource,omitempty"` -} - -/* - * String function for representing an auto-generated event from a device. - */ -func (a AutoEvent) String() string { - out, err := json.Marshal(a) - if err != nil { - return err.Error() - } - return string(out) -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/callbackalert.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/callbackalert.go deleted file mode 100644 index f1c736c9..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/callbackalert.go +++ /dev/null @@ -1,37 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import ( - "encoding/json" -) - -// CallbackAlert indicates an action to take when a callback fires. -type CallbackAlert struct { - ActionType ActionType `json:"type,omitempty"` - Id string `json:"id,omitempty"` -} - -/* - * String function for representing a CallbackAlert - */ -func (ca CallbackAlert) String() string { - out, err := json.Marshal(ca) - if err != nil { - return err.Error() - } - - return string(out) -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/category.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/category.go deleted file mode 100644 index 8800bfa5..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/category.go +++ /dev/null @@ -1,55 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Technologies Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - * - *******************************************************************************/ - -package models - -import ( - "encoding/json" - "fmt" -) - -// NotificationsCategory controls the range of values which constitute valid categories for notifications -type NotificationsCategory string - -const ( - Security = "SECURITY" - Hwhealth = "HW_HEALTH" - Swhealth = "SW_HEALTH" -) - -// UnmarshalJSON implements the Unmarshaler interface for the type -func (as *NotificationsCategory) UnmarshalJSON(data []byte) error { - // Extract the string from data. - var s string - if err := json.Unmarshal(data, &s); err != nil { - return fmt.Errorf("NotificationsCategory should be a string, got %s", data) - } - - got, err := map[string]NotificationsCategory{"SECURITY": Security, "HW_HEALTH": Hwhealth, "SW_HEALTH": Swhealth}[s] - if !err { - return fmt.Errorf("invalid NotificationsCategory %q", s) - } - *as = got - return nil -} - -// IsNotificationsCategory allows external code to verify whether the supplied string is a valid NotificationsCategory value -func IsNotificationsCategory(as string) bool { - _, err := map[string]NotificationsCategory{"SECURITY": Security, "HW_HEALTH": Hwhealth, "SW_HEALTH": Swhealth}[as] - if !err { - return false - } - return true -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/channel.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/channel.go deleted file mode 100644 index 403387aa..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/channel.go +++ /dev/null @@ -1,35 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Technologies Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - * - *******************************************************************************/ - -package models - -import ( - "encoding/json" -) - -// Channel supports transmissions and notifications with fields for delivery via email or REST -type Channel struct { - Type ChannelType `json:"type,omitempty"` // Type indicates whether the channel facilitates email or REST - MailAddresses []string `json:"mailAddresses,omitempty"` // MailAddresses contains email addresses - Url string `json:"url,omitempty"` // URL contains a REST API destination -} - -func (c Channel) String() string { - out, err := json.Marshal(c) - if err != nil { - return err.Error() - } - return string(out) -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/channel_type.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/channel_type.go deleted file mode 100644 index ddbbec0a..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/channel_type.go +++ /dev/null @@ -1,53 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Technologies Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - * - *******************************************************************************/ - -package models - -import ( - "encoding/json" - "fmt" -) - -// ChannelType controls the range of values which constitute valid delivery types for channels -type ChannelType string - -const ( - Rest = "REST" - Email = "EMAIL" -) - -// UnmarshalJSON implements the Unmarshaler interface for the type -func (as *ChannelType) UnmarshalJSON(data []byte) error { - // Extract the string from data. - var s string - if err := json.Unmarshal(data, &s); err != nil { - return fmt.Errorf("ChannelType should be a string, got %s", data) - } - - got, err := map[string]ChannelType{"REST": Rest, "EMAIL": Email}[s] - if !err { - return fmt.Errorf("invalid ChannelType %q", s) - } - *as = got - return nil -} - -func (as ChannelType) Validate() (bool, error) { - _, err := map[string]ChannelType{"REST": Rest, "EMAIL": Email}[string(as)] - if !err { - return false, NewErrContractInvalid(fmt.Sprintf("invalid Channeltype %q", as)) - } - return true, nil -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/command.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/command.go deleted file mode 100644 index be2b5d01..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/command.go +++ /dev/null @@ -1,129 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import ( - "encoding/json" - "reflect" -) - -// Command defines a specific read/write operation targeting a device -type Command struct { - Timestamps `yaml:",inline"` - Id string `json:"id" yaml:"id,omitempty"` // Id is a unique identifier, such as a UUID - Name string `json:"name" yaml:"name,omitempty"` // Command name (unique on the profile) - Get Get `json:"get" yaml:"get,omitempty"` // Get Command - Put Put `json:"put" yaml:"put,omitempty"` // Put Command - isValidated bool // internal member used for validation check -} - -// MarshalJSON implements the Marshaler interface. Empty strings will be null. -func (c Command) MarshalJSON() ([]byte, error) { - test := struct { - Timestamps - Id string `json:"id,omitempty"` - Name string `json:"name,omitempty"` // Command name (unique on the profile) - Get *Get `json:"get,omitempty"` // Get Command - Put *Put `json:"put,omitempty"` // Put Command - }{ - Timestamps: c.Timestamps, - Id: c.Id, - Name: c.Name, - Get: &c.Get, - Put: &c.Put, - } - - // Make empty structs nil pointers so they aren't marshaled - if reflect.DeepEqual(c.Get, Get{}) { - test.Get = nil - } - if reflect.DeepEqual(c.Put, Put{}) { - test.Put = nil - } - - return json.Marshal(test) -} - -// UnmarshalJSON implements the Unmarshaler interface for the Command type -func (c *Command) UnmarshalJSON(data []byte) error { - var err error - a := new(struct { - Timestamps `json:",inline"` - Id *string `json:"id"` - Name *string `json:"name"` // Command name (unique on the profile) - Get Get `json:"get"` // Get Command - Put Put `json:"put"` // Put Command - }) - - // Error with unmarshaling - if err = json.Unmarshal(data, a); err != nil { - return err - } - - // Check nil fields - if a.Id != nil { - c.Id = *a.Id - } - if a.Name != nil { - c.Name = *a.Name - } - c.Get = a.Get - c.Put = a.Put - c.Timestamps = a.Timestamps - - c.isValidated, err = c.Validate() - - return err -} - -// Validate satisfies the Validator interface -func (c Command) Validate() (bool, error) { - if !c.isValidated { - if c.Name == "" { - return false, NewErrContractInvalid("Name cannot be blank") - } - err := validate(c) - if err != nil { - return false, err - } - return true, nil - } - return c.isValidated, nil -} - -/* - * String() function for formatting - */ -func (c Command) String() string { - out, err := json.Marshal(c) - if err != nil { - return err.Error() - } - return string(out) -} - -// AllAssociatedValueDescriptors will append all the associated value descriptors to the list -// associated by PUT command parameters and PUT/GET command return values -func (c *Command) AllAssociatedValueDescriptors(vdNames *map[string]string) { - // Check and add Get value descriptors - if &(c.Get) != nil { - c.Get.AllAssociatedValueDescriptors(vdNames) - } - - // Check and add Put value descriptors - if &(c.Put) != nil { - c.Put.AllAssociatedValueDescriptors(vdNames) - } -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/commandresponse.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/commandresponse.go deleted file mode 100644 index 02a2b4cc..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/commandresponse.go +++ /dev/null @@ -1,73 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import ( - "encoding/json" - "fmt" - - "github.com/edgexfoundry/go-mod-core-contracts/clients" -) - -// CommandResponse identifies a specific device along with its supported commands. -type CommandResponse struct { - Id string `json:"id,omitempty"` // Id uniquely identifies the CommandResponse, UUID for example. - Name string `json:"name,omitempty"` // Unique name for identifying a device - AdminState AdminState `json:"adminState,omitempty"` // Admin state (locked/unlocked) - OperatingState OperatingState `json:"operatingState,omitempty"` // Operating state (enabled/disabled) - LastConnected int64 `json:"lastConnected,omitempty"` // Time (milliseconds) that the device last provided any feedback or responded to any request - LastReported int64 `json:"lastReported,omitempty"` // Time (milliseconds) that the device reported data to the core microservice - Labels []string `json:"labels,omitempty"` // Other labels applied to the device to help with searching - Location interface{} `json:"location,omitempty"` // Device service specific location (interface{} is an empty interface so it can be anything) - Commands []Command `json:"commands,omitempty"` // Associated Device Profile - Describes the device -} - -/* - * String function for representing a device - */ -func (d CommandResponse) String() string { - out, err := json.Marshal(d) - if err != nil { - return err.Error() - } - return string(out) -} - -/* - * CommandResponseFromDevice will create a CommandResponse struct from the supplied Device struct - */ -func CommandResponseFromDevice(d Device, commands []Command, cmdURL string) CommandResponse { - cmdResp := CommandResponse{ - Id: d.Id, - Name: d.Name, - AdminState: d.AdminState, - OperatingState: d.OperatingState, - LastConnected: d.LastConnected, - LastReported: d.LastReported, - Labels: d.Labels, - Location: d.Location, - Commands: commands, - } - - basePath := fmt.Sprintf("%s%s/%s/command/", cmdURL, clients.ApiDeviceRoute, d.Id) - // TODO: Find a way to encapsulate this within the "Action" struct if possible - for i := 0; i < len(cmdResp.Commands); i++ { - url := basePath + cmdResp.Commands[i].Id - cmdResp.Commands[i].Get.URL = url - cmdResp.Commands[i].Put.URL = url - } - - return cmdResp -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/constants.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/constants.go deleted file mode 100644 index 84c90462..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/constants.go +++ /dev/null @@ -1,28 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -// These constants are used by the unit tests in the models. -const ( - // common - testEmptyJSON = "{}" - - // action - testCode = "200" - testDescription = "ok" - testExpectedvalue1 = "temperature" - testExpectedvalue2 = "humidity" - testActionPath = "test/path" -) diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/describedobject.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/describedobject.go deleted file mode 100644 index a04978d6..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/describedobject.go +++ /dev/null @@ -1,34 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import "encoding/json" - -// DescribedObject is a hold-over from the Java conversion and is supposed to represent inheritance whereby a type -// with a Description property IS A DescribedObject. However since there is no inheritance in Go, this should be -// eliminated and the Description property moved to the relevant types. 4 types currently use this. -type DescribedObject struct { - Timestamps `yaml:",inline"` - Description string `json:"description,omitempty" yaml:"description,omitempty"` // Description. Capicé? -} - -// String returns a JSON formatted string representation of this DescribedObject -func (o DescribedObject) String() string { - out, err := json.Marshal(o) - if err != nil { - return err.Error() - } - return string(out) -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/device.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/device.go deleted file mode 100644 index acd361e6..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/device.go +++ /dev/null @@ -1,170 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import ( - "encoding/json" - "reflect" -) - -// Device represents a registered device participating within the EdgeX Foundry ecosystem -type Device struct { - DescribedObject - Id string `json:"id"` // ID uniquely identifies the device, a UUID for example - Name string `json:"name"` // Unique name for identifying a device - AdminState AdminState `json:"adminState"` // Admin state (locked/unlocked) - OperatingState OperatingState `json:"operatingState"` // Operating state (enabled/disabled) - Protocols map[string]ProtocolProperties `json:"protocols"` // A map of supported protocols for the given device - LastConnected int64 `json:"lastConnected"` // Time (milliseconds) that the device last provided any feedback or responded to any request - LastReported int64 `json:"lastReported"` // Time (milliseconds) that the device reported data to the core microservice - Labels []string `json:"labels"` // Other labels applied to the device to help with searching - Location interface{} `json:"location"` // Device service specific location (interface{} is an empty interface so it can be anything) - Service DeviceService `json:"service"` // Associated Device Service - One per device - Profile DeviceProfile `json:"profile"` // Associated Device Profile - Describes the device - AutoEvents []AutoEvent `json:"autoEvents"` // A list of auto-generated events coming from the device - isValidated bool // internal member used for validation check -} - -// ProtocolProperties contains the device connection information in key/value pair -type ProtocolProperties map[string]string - -// MarshalJSON implements the Marshaler interface in order to make empty strings null -func (d Device) MarshalJSON() ([]byte, error) { - test := struct { - DescribedObject - Id string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - AdminState AdminState `json:"adminState,omitempty"` - OperatingState OperatingState `json:"operatingState,omitempty"` - Protocols map[string]ProtocolProperties `json:"protocols,omitempty"` - LastConnected int64 `json:"lastConnected,omitempty"` - LastReported int64 `json:"lastReported,omitempty"` - Labels []string `json:"labels,omitempty"` - Location interface{} `json:"location,omitempty"` - Service *DeviceService `json:"service,omitempty"` - Profile *DeviceProfile `json:"profile,omitempty"` - AutoEvents []AutoEvent `json:"autoEvents,omitempty"` - }{ - Id: d.Id, - Name: d.Name, - DescribedObject: d.DescribedObject, - AdminState: d.AdminState, - OperatingState: d.OperatingState, - Protocols: d.Protocols, - LastConnected: d.LastConnected, - LastReported: d.LastReported, - Labels: d.Labels, - Location: d.Location, - Service: &d.Service, - Profile: &d.Profile, - AutoEvents: d.AutoEvents, - } - - if reflect.DeepEqual(*test.Service, DeviceService{}) { - test.Service = nil - } - - if reflect.DeepEqual(*test.Profile, DeviceProfile{}) { - test.Profile = nil - } - - return json.Marshal(test) -} - -// UnmarshalJSON implements the Unmarshaler interface for the Device type -func (d *Device) UnmarshalJSON(data []byte) error { - var err error - type Alias struct { - DescribedObject `json:",inline"` - Id string `json:"id"` - Name string `json:"name"` - AdminState AdminState `json:"adminState"` - OperatingState OperatingState `json:"operatingState"` - Protocols map[string]ProtocolProperties `json:"protocols"` - LastConnected int64 `json:"lastConnected"` - LastReported int64 `json:"lastReported"` - Labels []string `json:"labels"` - Location interface{} `json:"location"` - Service DeviceService `json:"service"` - Profile DeviceProfile `json:"profile"` - AutoEvents []AutoEvent `json:"autoEvents"` - } - a := Alias{} - // Error with unmarshaling - if err = json.Unmarshal(data, &a); err != nil { - return err - } - - d.Id = a.Id - d.Name = a.Name - d.DescribedObject = a.DescribedObject - d.AdminState = a.AdminState - d.OperatingState = a.OperatingState - d.Protocols = a.Protocols - d.LastConnected = a.LastConnected - d.LastReported = a.LastReported - d.Labels = a.Labels - d.Location = a.Location - d.Service = a.Service - d.Profile = a.Profile - d.AutoEvents = a.AutoEvents - - d.isValidated, err = d.Validate() - - return err -} - -// Validate satisfies the Validator interface -func (d Device) Validate() (bool, error) { - if !d.isValidated { - if d.Id == "" && d.Name == "" { - return false, NewErrContractInvalid("Device ID and Name are both blank") - } - if len(d.Protocols) == 0 { - return false, NewErrContractInvalid("no supporting protocol specified for device") - } - err := validate(d) - if err != nil { - return false, err - } - return true, nil - } - return d.isValidated, nil -} - -/* - * String function for representing a device - */ -func (d Device) String() string { - out, err := json.Marshal(d) - if err != nil { - return err.Error() - } - return string(out) -} - -// AllAssociatedValueDescriptors returns all the associated value descriptors through Put command parameters and Put/Get command return values -func (d *Device) AllAssociatedValueDescriptors(vdNames *[]string) { - // Get the value descriptors with a map (set) where the keys are the value descriptor names - vdNamesMap := map[string]string{} - for _, c := range d.Profile.CoreCommands { - c.AllAssociatedValueDescriptors(&vdNamesMap) - } - - // Add the map keys (value descriptor names) to the list - for vd := range vdNamesMap { - *vdNames = append(*vdNames, vd) - } -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/deviceprofile.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/deviceprofile.go deleted file mode 100644 index 8f87b07a..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/deviceprofile.go +++ /dev/null @@ -1,114 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import ( - "encoding/json" -) - -// DeviceProfile represents the attributes and operational capabilities of a device. It is a template for which -// there can be multiple matching devices within a given system. -type DeviceProfile struct { - DescribedObject `yaml:",inline"` - Id string `json:"id,omitempty" yaml:"id,omitempty"` - Name string `json:"name,omitempty" yaml:"name,omitempty"` // Non-database identifier (must be unique) - Manufacturer string `json:"manufacturer,omitempty" yaml:"manufacturer,omitempty"` // Manufacturer of the device - Model string `json:"model,omitempty" yaml:"model,omitempty"` // Model of the device - Labels []string `json:"labels,omitempty" yaml:"labels,flow,omitempty"` // Labels used to search for groups of profiles - DeviceResources []DeviceResource `json:"deviceResources,omitempty" yaml:"deviceResources,omitempty"` - DeviceCommands []ProfileResource `json:"deviceCommands,omitempty" yaml:"deviceCommands,omitempty"` - CoreCommands []Command `json:"coreCommands,omitempty" yaml:"coreCommands,omitempty"` // List of commands to Get/Put information for devices associated with this profile - isValidated bool // internal member used for validation check -} - -// UnmarshalJSON implements the Unmarshaler interface for the DeviceProfile type -func (dp *DeviceProfile) UnmarshalJSON(data []byte) error { - var err error - type Alias struct { - DescribedObject `json:",inline"` - Id *string `json:"id"` - Name *string `json:"name"` - Manufacturer *string `json:"manufacturer"` - Model *string `json:"model"` - Labels []string `json:"labels"` - DeviceResources []DeviceResource `json:"deviceResources"` - DeviceCommands []ProfileResource `json:"deviceCommands"` - CoreCommands []Command `json:"coreCommands"` - } - a := Alias{} - // Error with unmarshaling - if err = json.Unmarshal(data, &a); err != nil { - return err - } - - // Check nil fields - if a.Id != nil { - dp.Id = *a.Id - } - if a.Name != nil { - dp.Name = *a.Name - } - if a.Manufacturer != nil { - dp.Manufacturer = *a.Manufacturer - } - if a.Model != nil { - dp.Model = *a.Model - } - dp.DescribedObject = a.DescribedObject - dp.Labels = a.Labels - dp.DeviceResources = a.DeviceResources - dp.DeviceCommands = a.DeviceCommands - dp.CoreCommands = a.CoreCommands - - dp.isValidated, err = dp.Validate() - - return err - -} - -// Validate satisfies the Validator interface -func (dp DeviceProfile) Validate() (bool, error) { - if !dp.isValidated { - if dp.Id == "" && dp.Name == "" { - return false, NewErrContractInvalid("Device ID and Name are both blank") - } - // Check if there are duplicate names in the device profile command list - cmds := map[string]int{} - for _, c := range dp.CoreCommands { - if _, ok := cmds[c.Name]; !ok { - cmds[c.Name] = 1 - } else { - return false, NewErrContractInvalid("duplicate names in device profile commands") - } - } - err := validate(dp) - if err != nil { - return false, err - } - return true, nil - } - return dp.isValidated, nil -} - -/* - * To String function for DeviceProfile - */ -func (dp DeviceProfile) String() string { - out, err := json.Marshal(dp) - if err != nil { - return err.Error() - } - return string(out) -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/devicereport.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/devicereport.go deleted file mode 100644 index 9659af65..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/devicereport.go +++ /dev/null @@ -1,40 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import ( - "encoding/json" -) - -// Deprecated: DeviceReport isn't utilized and needs to be removed. -type DeviceReport struct { - Timestamps - Id string `json:"id,omitempty"` - Name string `json:"name,omitempty"` // non-database identifier for a device report - must be unique - Device string `json:"device,omitempty"` // associated device name - should be a valid and unique device name - Action string `json:"action,omitempty"` // associated interval action name - should be a valid and unique interval action name - Expected []string `json:"expected,omitempty"` // array of value descriptor names describing the types of data captured in the report -} - -/* - * To String function for DeviceProfile - */ -func (dr DeviceReport) String() string { - out, err := json.Marshal(dr) - if err != nil { - return err.Error() - } - return string(out) -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/deviceresource.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/deviceresource.go deleted file mode 100644 index 0f5eb9a9..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/deviceresource.go +++ /dev/null @@ -1,66 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import ( - "encoding/json" - "reflect" -) - -// DeviceResource represents a value on a device that can be read or written -type DeviceResource struct { - Description string `json:"description" yaml:"description,omitempty"` - Name string `json:"name" yaml:"name,omitempty"` - Tag string `json:"tag" yaml:"tag,omitempty"` - Properties ProfileProperty `json:"properties" yaml:"properties"` - Attributes map[string]string `json:"attributes" yaml:"attributes,omitempty"` -} - -// MarshalJSON implements the Marshaler interface in order to make empty strings null -func (do DeviceResource) MarshalJSON() ([]byte, error) { - test := struct { - Description string `json:"description,omitempty"` - Name string `json:"name,omitempty"` - Tag string `json:"tag,omitempty"` - Properties *ProfileProperty `json:"properties,omitempty"` - Attributes *map[string]string `json:"attributes,omitempty"` - }{ - Description: do.Description, - Name: do.Name, - Tag: do.Tag, - Properties: &do.Properties, - } - - // Empty maps are null - if len(do.Attributes) > 0 { - test.Attributes = &do.Attributes - } - if reflect.DeepEqual(do.Properties, ProfileProperty{}) { - test.Properties = nil - } - - return json.Marshal(test) -} - -/* - * To String function for DeviceResource - */ -func (do DeviceResource) String() string { - out, err := json.Marshal(do) - if err != nil { - return err.Error() - } - return string(out) -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/deviceservice.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/deviceservice.go deleted file mode 100644 index 058e64a6..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/deviceservice.go +++ /dev/null @@ -1,129 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import ( - "encoding/json" - "reflect" -) - -// DeviceService represents a service that is responsible for proxying connectivity between a set of devices and the -// EdgeX Foundry core services. -type DeviceService struct { - DescribedObject - Id string `json:"id"` - Name string `json:"name"` // time in milliseconds that the device last provided any feedback or responded to any request - LastConnected int64 `json:"lastConnected"` // time in milliseconds that the device last reported data to the core - LastReported int64 `json:"lastReported"` // operational state - either enabled or disabled - OperatingState OperatingState `json:"operatingState"` // operational state - ether enabled or disableddc - Labels []string `json:"labels"` // tags or other labels applied to the device service for search or other identification needs - Addressable Addressable `json:"addressable"` // address (MQTT topic, HTTP address, serial bus, etc.) for reaching the service - AdminState AdminState `json:"adminState"` // Device Service Admin State - isValidated bool // internal member used for validation check -} - -// MarshalJSON implements the Marshaler interface in order to make empty strings null -func (ds DeviceService) MarshalJSON() ([]byte, error) { - test := struct { - DescribedObject `json:",inline"` - Id string `json:"id,omitempty"` - Name string `json:"name,omitempty"` // time in milliseconds that the device last provided any feedback or responded to any request - LastConnected int64 `json:"lastConnected,omitempty"` // time in milliseconds that the device last reported data to the core - LastReported int64 `json:"lastReported,omitempty"` // operational state - either enabled or disabled - OperatingState OperatingState `json:"operatingState,omitempty"` // operational state - ether enabled or disableddc - Labels []string `json:"labels,omitempty"` // tags or other labels applied to the device service for search or other identification needs - Addressable *Addressable `json:"addressable,omitempty"` // address (MQTT topic, HTTP address, serial bus, etc.) for reaching the service - AdminState AdminState `json:"adminState,omitempty"` // Device Service Admin State - }{ - DescribedObject: ds.DescribedObject, - Id: ds.Id, - Name: ds.Name, - LastConnected: ds.LastConnected, - LastReported: ds.LastReported, - OperatingState: ds.OperatingState, - Labels: ds.Labels, - Addressable: &ds.Addressable, - AdminState: ds.AdminState, - } - - if reflect.DeepEqual(*test.Addressable, Addressable{}) { - test.Addressable = nil - } - - return json.Marshal(test) -} - -// UnmarshalJSON implements the Unmarshaler interface for the DeviceService type -func (ds *DeviceService) UnmarshalJSON(data []byte) error { - var err error - type Alias struct { - DescribedObject `json:",inline"` - Id string `json:"id"` - Name *string `json:"name"` // time in milliseconds that the device last provided any feedback or responded to any request - LastConnected int64 `json:"lastConnected"` // time in milliseconds that the device last reported data to the core - LastReported int64 `json:"lastReported"` // operational state - either enabled or disabled - OperatingState OperatingState `json:"operatingState"` // operational state - ether enabled or disableddc - Labels []string `json:"labels"` // tags or other labels applied to the device service for search or other identification needs - Addressable Addressable `json:"addressable"` // address (MQTT topic, HTTP address, serial bus, etc.) for reaching the service - AdminState AdminState `json:"adminState"` // Device Service Admin State - } - a := Alias{} - - // Error with unmarshaling - if err = json.Unmarshal(data, &a); err != nil { - return err - } - - // Set the fields - ds.AdminState = a.AdminState - ds.DescribedObject = a.DescribedObject - ds.LastConnected = a.LastConnected - ds.LastReported = a.LastReported - ds.OperatingState = a.OperatingState - ds.Labels = a.Labels - ds.Addressable = a.Addressable - ds.Id = a.Id - - // Name can be nil - if a.Name != nil { - ds.Name = *a.Name - } - - ds.isValidated, err = ds.Validate() - - return err -} - -// Validate satisfies the Validator interface -func (ds DeviceService) Validate() (bool, error) { - if !ds.isValidated { - if ds.Id == "" && ds.Name == "" { - return false, NewErrContractInvalid("Device Service ID and Name are both blank") - } - return true, nil - } - return ds.isValidated, nil -} - -/* - * To String function for DeviceService - */ -func (ds DeviceService) String() string { - out, err := json.Marshal(ds) - if err != nil { - return err.Error() - } - return string(out) -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/encryptiondetails.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/encryptiondetails.go deleted file mode 100644 index 99353896..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/encryptiondetails.go +++ /dev/null @@ -1,29 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -// Encryption types -const ( - EncNone = "NONE" - EncAes = "AES" -) - -// EncryptionDetails - Provides details for encryption -// of export data per client request -type EncryptionDetails struct { - Algo string `json:"encryptionAlgorithm,omitempty"` - Key string `json:"encryptionKey,omitempty"` - InitVector string `json:"initializingVector,omitempty"` -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/errors.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/errors.go deleted file mode 100644 index 2271af2d..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/errors.go +++ /dev/null @@ -1,32 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -// ErrContractInvalid is a specific error type for handling model validation failures. Type checking within -// the calling application will facilitate more explicit error handling whereby it's clear that validation -// has failed as opposed to something unexpected happening. -type ErrContractInvalid struct { - errMsg string -} - -// NewErrContractInvalid returns an instance of the error interface with ErrContractInvalid as its implementation. -func NewErrContractInvalid(message string) error { - return ErrContractInvalid{errMsg: message} -} - -// Error fulfills the error interface and returns an error message assembled from the state of ErrContractInvalid. -func (e ErrContractInvalid) Error() string { - return e.errMsg -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/event.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/event.go deleted file mode 100644 index de1dd58a..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/event.go +++ /dev/null @@ -1,108 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import ( - "encoding/json" - - "github.com/fxamacker/cbor/v2" -) - -// Event represents a single measurable event read from a device -type Event struct { - ID string `json:"id,omitempty" codec:"id,omitempty"` // ID uniquely identifies an event, for example a UUID - Pushed int64 `json:"pushed,omitempty" codec:"pushed,omitempty"` // Pushed is a timestamp indicating when the event was exported. If unexported, the value is zero. - Device string `json:"device,omitempty" codec:"device,omitempty"` // Device identifies the source of the event, can be a device name or id. Usually the device name. - Created int64 `json:"created,omitempty" codec:"created,omitempty"` // Created is a timestamp indicating when the event was created. - Modified int64 `json:"modified,omitempty" codec:"modified,omitempty"` // Modified is a timestamp indicating when the event was last modified. - Origin int64 `json:"origin,omitempty" codec:"origin,omitempty"` // Origin is a timestamp that can communicate the time of the original reading, prior to event creation - Readings []Reading `json:"readings,omitempty" codec:"readings,omitempty"` // Readings will contain zero to many entries for the associated readings of a given event. - isValidated bool // internal member used for validation check -} - -func encodeAsCBOR(e Event) ([]byte, error) { - bytes, err := cbor.Marshal(e) - if err != nil { - return []byte{}, err - } - - return bytes, nil -} - -// UnmarshalJSON implements the Unmarshaler interface for the Event type -func (e *Event) UnmarshalJSON(data []byte) error { - var err error - type Alias struct { - ID *string `json:"id"` - Pushed int64 `json:"pushed"` - Device *string `json:"device"` - Created int64 `json:"created"` - Modified int64 `json:"modified"` - Origin int64 `json:"origin"` - Readings []Reading `json:"readings"` - } - a := Alias{} - - // Error with unmarshaling - if err = json.Unmarshal(data, &a); err != nil { - return err - } - - // Set the fields - if a.ID != nil { - e.ID = *a.ID - } - if a.Device != nil { - e.Device = *a.Device - } - e.Pushed = a.Pushed - e.Created = a.Created - e.Modified = a.Modified - e.Origin = a.Origin - e.Readings = a.Readings - - e.isValidated, err = e.Validate() - return err -} - -// Validate satisfies the Validator interface -func (e Event) Validate() (bool, error) { - if !e.isValidated { - if e.Device == "" { - return false, NewErrContractInvalid("source device for event not specified") - } - } - return true, nil -} - -// String provides a JSON representation of the Event as a string -func (e Event) String() string { - out, err := json.Marshal(e) - if err != nil { - return err.Error() - } - - return string(out) -} - -// CBOR provides a byte array CBOR-encoded representation of the Event -func (e Event) CBOR() []byte { - cbor, err := encodeAsCBOR(e) - if err != nil { - return []byte{} - } - - return cbor -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/filter.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/filter.go deleted file mode 100644 index 32e4b916..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/filter.go +++ /dev/null @@ -1,21 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -// Filter - Specifies the client filters on reading data -type Filter struct { - DeviceIDs []string `json:"deviceIdentifiers,omitempty"` - ValueDescriptorIDs []string `json:"valueDescriptorIdentifiers,omitempty"` -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/get.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/get.go deleted file mode 100644 index 51cf2aae..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/get.go +++ /dev/null @@ -1,44 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import "encoding/json" - -type Get struct { - Action `json:",omitempty" yaml:",inline"` -} - -/* - * To String function for Get Struct - */ -func (g Get) String() string { - out, err := json.Marshal(g) - if err != nil { - return err.Error() - } - return string(out) -} - -// Append the associated value descriptors to the list -func (g *Get) AllAssociatedValueDescriptors(vdNames *map[string]string) { - for _, r := range g.Action.Responses { - for _, ev := range r.ExpectedValues { - // Only add to the map if the value is not there - if _, ok := (*vdNames)[ev]; !ok { - (*vdNames)[ev] = ev - } - } - } -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/interval.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/interval.go deleted file mode 100644 index a966b86e..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/interval.go +++ /dev/null @@ -1,168 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import ( - "encoding/json" - "fmt" - "reflect" - "regexp" - "time" -) - -const ( - frequencyPattern = `^P(\d+Y)?(\d+M)?(\d+D)?(T(\d+H)?(\d+M)?(\d+S)?)?$` - timestampLayout = "20060102T150405" -) - -// Interval a period of time -type Interval struct { - Timestamps Timestamps - ID string `json:"id,omitempty"` - Name string `json:"name,omitempty"` // non-database identifier for a shcedule (*must be quitue) - Start string `json:"start,omitempty"` // Start time i ISO 8601 format YYYYMMDD'T'HHmmss - End string `json:"end,omitempty"` // Start time i ISO 8601 format YYYYMMDD'T'HHmmss - Frequency string `json:"frequency,omitempty"` // how frequently should the event occur according ISO 8601 - Cron string `json:"cron,omitempty"` // cron styled regular expression indicating how often the action under interval should occur. Use either runOnce, frequency or cron and not all. - RunOnce bool `json:"runOnce,omitempty"` // boolean indicating that this interval runs one time - at the time indicated by the start - isValidated bool // internal member used for validation check -} - -// Custom marshaling to make empty strings null -func (i Interval) MarshalJSON() ([]byte, error) { - test := struct { - Timestamps *Timestamps `json:",omitempty"` - ID string `json:"id,omitempty"` - Name string `json:"name,omitempty"` // non-database identifier for a schedule (*must be quitue) - Start string `json:"start,omitempty"` // Start time i ISO 8601 format YYYYMMDD'T'HHmmss - End string `json:"end,omitempty"` // Start time i ISO 8601 format YYYYMMDD'T'HHmmss - Frequency string `json:"frequency,omitempty"` // how frequently should the event occur - Cron string `json:"cron,omitempty"` // cron styled regular expression indicating how often the action under schedule should occur. Use either runOnce, frequency or cron and not all. - RunOnce bool `json:"runOnce,omitempty"` // boolean indicating that this interval runs one time - at the time indicated by the start - }{ - Timestamps: &i.Timestamps, - ID: i.ID, - Name: i.Name, - Start: i.Start, - End: i.End, - Frequency: i.Frequency, - Cron: i.Cron, - RunOnce: i.RunOnce, - } - - if reflect.DeepEqual(i.Timestamps, Timestamps{}) { - test.Timestamps = nil - } - - return json.Marshal(test) -} - -// UnmarshalJSON implements the Unmarshaler interface for the Interval type -func (i *Interval) UnmarshalJSON(data []byte) error { - var err error - type Alias struct { - Timestamps Timestamps `json:",omitempty"` - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Start *string `json:"start,omitempty"` - End *string `json:"end,omitempty"` - Frequency *string `json:"frequency,omitempty"` - Cron *string `json:"cron,omitempty"` - RunOnce bool `json:"runOnce,omitempty"` - } - a := Alias{} - // Error with unmarshaling - if err = json.Unmarshal(data, &a); err != nil { - return err - } - - // Nillable fields - if a.ID != nil { - i.ID = *a.ID - } - if a.Name != nil { - i.Name = *a.Name - } - if a.Start != nil { - i.Start = *a.Start - } - if a.End != nil { - i.End = *a.End - } - if a.Frequency != nil { - i.Frequency = *a.Frequency - } - if a.Cron != nil { - i.Cron = *a.Cron - } - i.Timestamps = a.Timestamps - i.RunOnce = a.RunOnce - - i.isValidated, err = i.Validate() - - return err -} - -// Validate satisfies the Validator interface -func (i Interval) Validate() (bool, error) { - if !i.isValidated { - if i.ID == "" && i.Name == "" { - return false, NewErrContractInvalid("Interval ID and Name are both blank") - } - if i.Start != "" { - _, err := time.Parse(timestampLayout, i.Start) - if err != nil { - return false, NewErrContractInvalid(fmt.Sprintf("error parsing Start %v", err)) - } - } - if i.End != "" { - _, err := time.Parse(timestampLayout, i.End) - if err != nil { - return false, NewErrContractInvalid(fmt.Sprintf("error parsing End %v", err)) - } - } - if i.Frequency != "" { - /* legacy frequencyPattern */ - matched, _ := regexp.MatchString(frequencyPattern, i.Frequency) - if matched { - if i.Frequency == "P" || i.Frequency == "PT" { - matched = false - } - } - if !matched { - // parse frequency example "1h15m30s10us9ns" - _, err := time.ParseDuration(i.Frequency) - if err != nil { - return false, NewErrContractInvalid(fmt.Sprintf("invalid Interval frequency %s format", i.Frequency)) - } - } - } - err := validate(i) - if err != nil { - return false, err - } - return true, nil - } - return i.isValidated, nil -} - -// String returns a JSON encoded string representation of this Interval -func (i Interval) String() string { - out, err := json.Marshal(i) - if err != nil { - return err.Error() - } - return string(out) -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/interval_action.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/interval_action.go deleted file mode 100644 index 7bb894cb..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/interval_action.go +++ /dev/null @@ -1,153 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import ( - "encoding/json" - "strconv" - "strings" -) - -type IntervalAction struct { - ID string `json:"id,omitempty"` - Created int64 `json:"created,omitempty"` - Modified int64 `json:"modified,omitempty"` - Origin int64 `json:"origin,omitempty"` - Name string `json:"name,omitempty"` - Interval string `json:"interval,omitempty"` - Parameters string `json:"parameters,omitempty"` - Target string `json:"target,omitempty"` - Protocol string `json:"protocol,omitempty"` - HTTPMethod string `json:"httpMethod,omitempty"` - Address string `json:"address,omitempty"` - Port int `json:"port,omitempty"` - Path string `json:"path,omitempty"` - Publisher string `json:"publisher,omitempty"` - User string `json:"user,omitempty"` - Password string `json:"password,omitempty"` - Topic string `json:"topic,omitempty"` - isValidated bool // internal member used for validation check -} - -// UnmarshalJSON implements the Unmarshaler interface for the IntervalAction type -func (ia *IntervalAction) UnmarshalJSON(data []byte) error { - var err error - type Alias struct { - ID *string `json:"id"` - Created int64 `json:"created"` - Modified int64 `json:"modified"` - Origin int64 `json:"origin"` - Name *string `json:"name"` - Interval *string `json:"interval"` - Parameters *string `json:"parameters"` - Target *string `json:"target"` - Protocol *string `json:"protocol"` - HTTPMethod *string `json:"httpMethod"` - Address *string `json:"address"` - Port int `json:"port"` - Path *string `json:"path"` - Publisher *string `json:"publisher"` - User *string `json:"user"` - Password *string `json:"password"` - Topic *string `json:"topic"` - } - a := Alias{} - // Error with unmarshaling - if err = json.Unmarshal(data, &a); err != nil { - return err - } - - // Nillable fields - if a.ID != nil { - ia.ID = *a.ID - } - if a.Name != nil { - ia.Name = *a.Name - } - if a.Interval != nil { - ia.Interval = *a.Interval - } - if a.Parameters != nil { - ia.Parameters = *a.Parameters - } - if a.Target != nil { - ia.Target = *a.Target - } - if a.Protocol != nil { - ia.Protocol = *a.Protocol - } - if a.HTTPMethod != nil { - ia.HTTPMethod = *a.HTTPMethod - } - if a.Address != nil { - ia.Address = *a.Address - } - if a.Path != nil { - ia.Path = *a.Path - } - if a.Publisher != nil { - ia.Publisher = *a.Publisher - } - if a.User != nil { - ia.User = *a.User - } - if a.Password != nil { - ia.Password = *a.Password - } - if a.Topic != nil { - ia.Topic = *a.Topic - } - ia.Created = a.Created - ia.Modified = a.Modified - ia.Origin = a.Origin - ia.Port = a.Port - - ia.isValidated, err = ia.Validate() - - return err -} - -// Validate satisfies the Validator interface -func (ia IntervalAction) Validate() (bool, error) { - if !ia.isValidated { - if ia.ID == "" && ia.Name == "" { - return false, NewErrContractInvalid("IntervalAction ID and Name are both blank") - } - if ia.Target == "" { - return false, NewErrContractInvalid("intervalAction target is blank") - } - if ia.Interval == "" { - return false, NewErrContractInvalid("intervalAction interval is blank") - } - return true, nil - } - return ia.isValidated, nil -} - -func (ia IntervalAction) String() string { - out, err := json.Marshal(ia) - if err != nil { - return err.Error() - } - return string(out) -} - -func (ia IntervalAction) GetBaseURL() string { - protocol := strings.ToLower(ia.Protocol) - address := ia.Address - port := strconv.Itoa(ia.Port) - baseUrl := protocol + "://" + address + ":" + port - return baseUrl -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/log_entry.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/log_entry.go deleted file mode 100644 index 59382251..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/log_entry.go +++ /dev/null @@ -1,86 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import ( - "encoding/json" - "fmt" -) - -// These constants identify the log levels in order of increasing severity. -const ( - TraceLog = "TRACE" - DebugLog = "DEBUG" - InfoLog = "INFO" - WarnLog = "WARN" - ErrorLog = "ERROR" -) - -type LogEntry struct { - Level string `bson:"logLevel,omitempty" json:"logLevel"` - Args []interface{} `bson:"args,omitempty" json:"args"` - OriginService string `bson:"originService,omitempty" json:"originService"` - Message string `bson:"message,omitempty" json:"message"` - Created int64 `bson:"created,omitempty" json:"created"` - isValidated bool // internal member used for validation check -} - -// UnmarshalJSON implements the Unmarshaler interface for the LogEntry type -func (le *LogEntry) UnmarshalJSON(data []byte) error { - var err error - type Alias struct { - Level *string `json:"logLevel,omitempty"` - Args []interface{} `json:"args,omitempty"` - OriginService *string `json:"originService,omitempty"` - Message *string `json:"message,omitempty"` - Created int64 `json:"created,omitempty"` - } - a := Alias{} - // Error with unmarshaling - if err = json.Unmarshal(data, &a); err != nil { - return err - } - - // Nillable fields - if a.Level != nil { - le.Level = *a.Level - } - if a.OriginService != nil { - le.OriginService = *a.OriginService - } - if a.Message != nil { - le.Message = *a.Message - } - le.Args = a.Args - le.Created = a.Created - - le.isValidated, err = le.Validate() - - return err -} - -// Validate satisfies the Validator interface -func (le LogEntry) Validate() (bool, error) { - if !le.isValidated { - logLevels := []string{TraceLog, DebugLog, InfoLog, WarnLog, ErrorLog} - for _, name := range logLevels { - if name == le.Level { - return true, nil - } - } - return false, NewErrContractInvalid(fmt.Sprintf("Invalid level in LogEntry: %s", le.Level)) - } - return le.isValidated, nil -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/notifications.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/notifications.go deleted file mode 100644 index bc652577..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/notifications.go +++ /dev/null @@ -1,169 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Technologies Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - * - *******************************************************************************/ - -package models - -import ( - "encoding/json" - "reflect" -) - -type Notification struct { - Timestamps - ID string `json:"id,omitempty"` - Slug string `json:"slug,omitempty"` - Sender string `json:"sender,omitempty"` - Category NotificationsCategory `json:"category,omitempty"` - Severity NotificationsSeverity `json:"severity,omitempty"` - Content string `json:"content,omitempty"` - Description string `json:"description,omitempty"` - Status NotificationsStatus `json:"status,omitempty"` - Labels []string `json:"labels,omitempty"` - ContentType string `json:"contenttype,omitempty"` - isValidated bool // internal member used for validation check -} - -func (n Notification) MarshalJSON() ([]byte, error) { - test := struct { - *Timestamps `json:",omitempty"` - ID string `json:"id,omitempty"` - Slug string `json:"slug,omitempty"` - Sender string `json:"sender,omitempty"` - Category NotificationsCategory `json:"category,omitempty"` - Severity NotificationsSeverity `json:"severity,omitempty"` - Content string `json:"content,omitempty"` - Description string `json:"description,omitempty"` - Status NotificationsStatus `json:"status,omitempty"` - Labels []string `json:"labels,omitempty"` - ContentType string `json:"contenttype,omitempty"` - }{ - Timestamps: &n.Timestamps, - ID: n.ID, - Slug: n.Slug, - Sender: n.Sender, - Category: n.Category, - Severity: n.Severity, - Content: n.Content, - Description: n.Description, - Status: n.Status, - Labels: n.Labels, - ContentType: n.ContentType, - } - - if reflect.DeepEqual(n.Timestamps, Timestamps{}) { - test.Timestamps = nil - } - - return json.Marshal(test) -} - -// UnmarshalJSON implements the Unmarshaler interface for the Notification type -func (n *Notification) UnmarshalJSON(data []byte) error { - var err error - type Alias struct { - Timestamps - ID *string `json:"id"` - Slug *string `json:"slug,omitempty,omitempty"` - Sender *string `json:"sender,omitempty"` - Category NotificationsCategory `json:"category,omitempty"` - Severity NotificationsSeverity `json:"severity,omitempty"` - Content *string `json:"content,omitempty"` - Description *string `json:"description,omitempty"` - Status NotificationsStatus `json:"status,omitempty"` - Labels []string `json:"labels,omitempty"` - ContentType *string `json:"contenttype,omitempty"` - } - a := Alias{} - // Error with unmarshaling - if err = json.Unmarshal(data, &a); err != nil { - return err - } - - // Nillable fields - if a.ID != nil { - n.ID = *a.ID - } - if a.Slug != nil { - n.Slug = *a.Slug - } - if a.Sender != nil { - n.Sender = *a.Sender - } - if a.Content != nil { - n.Content = *a.Content - } - if a.Description != nil { - n.Description = *a.Description - } - if a.ContentType != nil { - n.ContentType = *a.ContentType - } - n.Timestamps = a.Timestamps - n.Category = a.Category - n.Severity = a.Severity - n.Status = a.Status - n.Labels = a.Labels - - n.isValidated, err = n.Validate() - - return err -} - -// Validate satisfies the Validator interface -func (n Notification) Validate() (bool, error) { - if !n.isValidated { - if n.ID == "" && n.Slug == "" { - return false, NewErrContractInvalid("Notifiaction ID and Slug are both blank") - } - if n.Sender == "" { - return false, NewErrContractInvalid("Sender is empty") - } - if n.Content == "" { - return false, NewErrContractInvalid("Content is empty") - } - if n.Category == "" { - return false, NewErrContractInvalid("Category is empty") - } - if n.Severity == "" { - return false, NewErrContractInvalid("Severity is empty") - } - if n.Severity != "" && n.Severity != "CRITICAL" && n.Severity != "NORMAL" { - return false, NewErrContractInvalid("Invalid notification severity") - } - if n.Category != "" && n.Category != "SECURITY" && n.Category != "HW_HEALTH" && n.Category != "SW_HEALTH" { - return false, NewErrContractInvalid("Invalid notification severity") - } - if n.Status != "" && n.Status != "NEW" && n.Status != "PROCESSED" && n.Status != "ESCALATED" { - return false, NewErrContractInvalid("Invalid notification severity") - } - err := validate(n) - if err != nil { - return false, err - } - return true, nil - } - return n.isValidated, nil -} - -/* - * To String function for Notification Struct - */ -func (n Notification) String() string { - out, err := json.Marshal(n) - if err != nil { - return err.Error() - } - return string(out) -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/operatingstate.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/operatingstate.go deleted file mode 100644 index 96a089a2..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/operatingstate.go +++ /dev/null @@ -1,71 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import ( - "encoding/json" - "fmt" - "strings" -) - -// OperatingState Constant String -type OperatingState string - -/* - Enabled : ENABLED - Disabled : DISABLED -*/ -const ( - Enabled = "ENABLED" - Disabled = "DISABLED" -) - -// UnmarshalJSON : Struct into json -func (os *OperatingState) UnmarshalJSON(data []byte) error { - // Extract the string from data. - var s string - if err := json.Unmarshal(data, &s); err != nil { - return fmt.Errorf("OperatingState should be a string, got %s", data) - } - - new := OperatingState(strings.ToUpper(s)) - *os = new - - return nil -} - -// Validate satisfies the Validator interface -func (os OperatingState) Validate() (bool, error) { - _, found := map[string]OperatingState{"ENABLED": Enabled, "DISABLED": Disabled}[string(os)] - if !found { - return false, NewErrContractInvalid(fmt.Sprintf("invalid OperatingState %q", os)) - } - return true, nil -} - -// GetOperatingState is called from within the router logic of the core services. For example, there are PUT calls -// like the one below from core-metadata which specify their update parameters in the URL -// -// d.HandleFunc("/{"+ID+"}/"+OPSTATE+"/{"+OPSTATE+"}", restSetDeviceOpStateById).Methods(http.MethodPut) -// -// Updates like this should be refactored to pass a body containing the new values instead of via the URL. This -// would allow us to utilize the model validation above and remove the logic from the controller. -// -// This will be removed once work on the following issue begins -- https://github.com/edgexfoundry/edgex-go/issues/1244 -func GetOperatingState(os string) (OperatingState, bool) { - os = strings.ToUpper(os) - o, err := map[string]OperatingState{"ENABLED": Enabled, "DISABLED": Disabled}[os] - return o, err -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/profileproperty.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/profileproperty.go deleted file mode 100644 index 62b07d64..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/profileproperty.go +++ /dev/null @@ -1,54 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import ( - "encoding/json" - "reflect" -) - -type ProfileProperty struct { - Value PropertyValue `json:"value"` - Units Units `json:"units"` -} - -// MarshalJSON implements the Marshaler interface -func (pp ProfileProperty) MarshalJSON() ([]byte, error) { - test := struct { - Value *PropertyValue `json:"value,omitempty"` - Units *Units `json:"units,omitempty"` - }{ - Value: &pp.Value, - Units: &pp.Units, - } - - if reflect.DeepEqual(pp.Value, PropertyValue{}) { - test.Value = nil - } - if reflect.DeepEqual(pp.Units, Units{}) { - test.Units = nil - } - - return json.Marshal(test) -} - -// String returns a JSON encoded string representation of this ProfileProperty -func (pp ProfileProperty) String() string { - out, err := json.Marshal(pp) - if err != nil { - return err.Error() - } - return string(out) -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/profileresource.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/profileresource.go deleted file mode 100644 index 8db1582b..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/profileresource.go +++ /dev/null @@ -1,32 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import "encoding/json" - -type ProfileResource struct { - Name string `json:"name,omitempty" yaml:"name,omitempty"` - Get []ResourceOperation `json:"get,omitempty" yaml:"get,omitempty"` - Set []ResourceOperation `json:"set,omitempty" yaml:"set,omitempty"` -} - -// String returns a JSON encoded string representation of the model -func (pr ProfileResource) String() string { - out, err := json.Marshal(pr) - if err != nil { - return err.Error() - } - return string(out) -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/propertyvalue.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/propertyvalue.go deleted file mode 100644 index c811baf9..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/propertyvalue.go +++ /dev/null @@ -1,53 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import ( - "encoding/json" -) - -const ( - // Base64Encoding : the float value is represented in Base64 encoding - Base64Encoding = "Base64" - // ENotation : the float value is represented in eNotation - ENotation = "eNotation" -) - -type PropertyValue struct { - Type string `json:"type,omitempty" yaml:"type,omitempty"` // ValueDescriptor Type of property after transformations - ReadWrite string `json:"readWrite,omitempty" yaml:"readWrite,omitempty"` // Read/Write Permissions set for this property - Minimum string `json:"minimum,omitempty" yaml:"minimum,omitempty"` // Minimum value that can be get/set from this property - Maximum string `json:"maximum,omitempty" yaml:"maximum,omitempty"` // Maximum value that can be get/set from this property - DefaultValue string `json:"defaultValue,omitempty" yaml:"defaultValue,omitempty"` // Default value set to this property if no argument is passed - Size string `json:"size,omitempty" yaml:"size,omitempty"` // Size of this property in its type (i.e. bytes for numeric types, characters for string types) - Mask string `json:"mask,omitempty" yaml:"mask,omitempty"` // Mask to be applied prior to get/set of property - Shift string `json:"shift,omitempty" yaml:"shift,omitempty"` // Shift to be applied after masking, prior to get/set of property - Scale string `json:"scale,omitempty" yaml:"scale,omitempty"` // Multiplicative factor to be applied after shifting, prior to get/set of property - Offset string `json:"offset,omitempty" yaml:"offset,omitempty"` // Additive factor to be applied after multiplying, prior to get/set of property - Base string `json:"base,omitempty" yaml:"base,omitempty"` // Base for property to be applied to, leave 0 for no power operation (i.e. base ^ property: 2 ^ 10) - Assertion string `json:"assertion,omitempty" yaml:"assertion,omitempty"` // Required value of the property, set for checking error state. Failing an assertion condition will mark the device with an error state - Precision string `json:"precision,omitempty" yaml:"precision,omitempty"` - FloatEncoding string `json:"floatEncoding,omitempty" yaml:"floatEncoding,omitempty"` // FloatEncoding indicates the representation of floating value of reading. It should be 'Base64' or 'eNotation' - MediaType string `json:"mediaType,omitempty" yaml:"mediaType,omitempty"` -} - -// String returns a JSON encoded string representation of the model -func (pv PropertyValue) String() string { - out, err := json.Marshal(pv) - if err != nil { - return err.Error() - } - return string(out) -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/provisionwatcher.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/provisionwatcher.go deleted file mode 100644 index 81fa8fe2..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/provisionwatcher.go +++ /dev/null @@ -1,135 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import ( - "encoding/json" - "reflect" -) - -type ProvisionWatcher struct { - Timestamps - Id string `json:"id"` - Name string `json:"name"` // unique name and identifier of the provision watcher - Identifiers map[string]string `json:"identifiers"` // set of key value pairs that identify property (MAC, HTTP,...) and value to watch for (00-05-1B-A1-99-99, 10.0.0.1,...) - BlockingIdentifiers map[string][]string `json:"blockingidentifiers"` // set of key-values pairs that identify devices which will not be added despite matching on Identifiers - Profile DeviceProfile `json:"profile"` // device profile that should be applied to the devices available at the identifier addresses - Service DeviceService `json:"service"` // device service that new devices will be associated to - AdminState AdminState `json:"adminState"` // administrative state for new devices - either unlocked or locked - OperatingState OperatingState `validate:"-"` // Deprecated: exists for historical compatibility and will be ignored - isValidated bool `` // internal member used for validation check -} - -// MarshalJSON returns a JSON encoded byte representation of the model -func (pw ProvisionWatcher) MarshalJSON() ([]byte, error) { - test := struct { - Timestamps - Id string `json:"id,omitempty"` - Name string `json:"name,omitempty"` // unique name and identifier of the addressable - Identifiers *map[string]string `json:"identifiers,omitempty"` // set of key value pairs that identify property (MAC, HTTP,...) and value to watch for (00-05-1B-A1-99-99, 10.0.0.1,...) - BlockingIdentifiers *map[string][]string `json:"blockingidentifiers,omitempty"` // set of key-values pairs that identify devices which will not be added despite matching on Identifiers - Profile *DeviceProfile `json:"profile,omitempty"` // device profile that should be applied to the devices available at the identifier addresses - Service *DeviceService `json:"service,omitempty"` // device service that new devices will be associated to - AdminState AdminState `json:"adminState,omitempty"` // administrative state for new devices - either unlocked or locked - }{ - Timestamps: pw.Timestamps, - Id: pw.Id, - Name: pw.Name, - Identifiers: &pw.Identifiers, - BlockingIdentifiers: &pw.BlockingIdentifiers, - Profile: &pw.Profile, - Service: &pw.Service, - AdminState: pw.AdminState, - } - - // Empty maps are null - if len(pw.Identifiers) == 0 { - test.Identifiers = nil - } - if len(pw.BlockingIdentifiers) == 0 { - test.BlockingIdentifiers = nil - } - - // Empty objects are nil - if reflect.DeepEqual(pw.Profile, DeviceProfile{}) { - test.Profile = nil - } - if reflect.DeepEqual(pw.Service, DeviceService{}) { - test.Service = nil - } - - return json.Marshal(test) -} - -// UnmarshalJSON implements the Unmarshaler interface for the ProvisionWatcher type -func (pw *ProvisionWatcher) UnmarshalJSON(data []byte) error { - var err error - type Alias struct { - Timestamps `json:",inline"` - Id string `json:"id"` - Name *string `json:"name"` - Identifiers map[string]string `json:"identifiers"` - BlockingIdentifiers map[string][]string `json:"blockingidentifiers"` - Profile DeviceProfile `json:"profile"` - Service DeviceService `json:"service"` - AdminState AdminState `json:"adminState"` - } - a := Alias{} - - // Error with unmarshaling - if err = json.Unmarshal(data, &a); err != nil { - return err - } - - // Name can be nil - if a.Name != nil { - pw.Name = *a.Name - } - pw.Timestamps = a.Timestamps - pw.Id = a.Id - pw.Identifiers = a.Identifiers - pw.BlockingIdentifiers = a.BlockingIdentifiers - pw.Profile = a.Profile - pw.Service = a.Service - pw.AdminState = a.AdminState - - pw.isValidated, err = pw.Validate() - - return err -} - -// Validate satisfies the Validator interface -func (pw ProvisionWatcher) Validate() (bool, error) { - if !pw.isValidated { - if pw.Name == "" { - return false, NewErrContractInvalid("provision watcher name is blank") - } - err := validate(pw) - if err != nil { - return false, err - } - return true, nil - } - return pw.isValidated, nil -} - -// String returns a JSON encoded string representation of the model -func (pw ProvisionWatcher) String() string { - out, err := json.Marshal(pw) - if err != nil { - return err.Error() - } - return string(out) -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/put.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/put.go deleted file mode 100644 index 4f5ab01f..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/put.go +++ /dev/null @@ -1,42 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import "encoding/json" - -// Put models a put command in EdgeX -type Put struct { - Action `yaml:",inline"` - ParameterNames []string `json:"parameterNames,omitempty" yaml:"parameterNames,omitempty"` -} - -// String returns a JSON encoded string representation of the model -func (p Put) String() string { - out, err := json.Marshal(p) - if err != nil { - return err.Error() - } - return string(out) -} - -// Append the associated value descriptors to the list -func (p *Put) AllAssociatedValueDescriptors(vdNames *map[string]string) { - for _, pn := range p.ParameterNames { - // Only add to the map if the value descriptor is NOT there - if _, ok := (*vdNames)[pn]; !ok { - (*vdNames)[pn] = pn - } - } -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/reading.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/reading.go deleted file mode 100644 index c3f590d4..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/reading.go +++ /dev/null @@ -1,178 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import ( - "encoding/json" - "strings" -) - -// Constants related to Reading ValueTypes -const ( - ValueTypeBool = "Bool" - ValueTypeString = "String" - ValueTypeUint8 = "Uint8" - ValueTypeUint16 = "Uint16" - ValueTypeUint32 = "Uint32" - ValueTypeUint64 = "Uint64" - ValueTypeInt8 = "Int8" - ValueTypeInt16 = "Int16" - ValueTypeInt32 = "Int32" - ValueTypeInt64 = "Int64" - ValueTypeFloat32 = "Float32" - ValueTypeFloat64 = "Float64" - ValueTypeBinary = "Binary" - ValueTypeBoolArray = "BoolArray" - ValueTypeStringArray = "StringArray" - ValueTypeUint8Array = "Uint8Array" - ValueTypeUint16Array = "Uint16Array" - ValueTypeUint32Array = "Uint32Array" - ValueTypeUint64Array = "Uint64Array" - ValueTypeInt8Array = "Int8Array" - ValueTypeInt16Array = "Int16Array" - ValueTypeInt32Array = "Int32Array" - ValueTypeInt64Array = "Int64Array" - ValueTypeFloat32Array = "Float32Array" - ValueTypeFloat64Array = "Float64Array" -) - -// Reading contains data that was gathered from a device. -// -// NOTE a Reading's BinaryValue is not to be persisted in the database. This architectural decision requires that -// serialization validation be relaxed for enforcing the presence of binary data for Binary ValueTypes. Also, that -// issuing GET operations to obtain Readings directly or indirectly via Events will result in a Reading with no -// BinaryValue for Readings with a ValueType of Binary. BinaryValue is to be present when creating or updating a Reading -// either directly, indirectly via an Event, and when the information is put on the EventBus. -type Reading struct { - Id string `json:"id,omitempty" codec:"id,omitempty"` - Pushed int64 `json:"pushed,omitempty" codec:"pushed,omitempty"` // When the data was pushed out of EdgeX (0 - not pushed yet) - Created int64 `json:"created,omitempty" codec:"created,omitempty"` // When the reading was created - Origin int64 `json:"origin,omitempty" codec:"origin,omitempty"` - Modified int64 `json:"modified,omitempty" codec:"modified,omitempty"` - Device string `json:"device,omitempty" codec:"device,omitempty"` - Name string `json:"name,omitempty" codec:"name,omitempty"` - Value string `json:"value,omitempty" codec:"value,omitempty"` // Device sensor data value - ValueType string `json:"valueType,omitempty" codec:"valueType,omitempty"` - FloatEncoding string `json:"floatEncoding,omitempty" codec:"floatEncoding,omitempty"` - // BinaryValue binary data payload. This information is not persisted in the Database and is expected to be empty - // when retrieving a Reading for the ValueType of Binary. - BinaryValue []byte `json:"binaryValue,omitempty" codec:"binaryValue,omitempty"` - MediaType string `json:"mediaType,omitempty" codec:"mediaType,omitempty"` - isValidated bool // internal member used for validation check -} - -// UnmarshalJSON implements the Unmarshaler interface for the Reading type -func (r *Reading) UnmarshalJSON(data []byte) error { - var err error - type Alias struct { - Id *string `json:"id"` - Pushed int64 `json:"pushed"` - Created int64 `json:"created"` - Origin int64 `json:"origin"` - Modified int64 `json:"modified"` - Device *string `json:"device"` - Name *string `json:"name"` - Value *string `json:"value"` - ValueType *string `json:"valueType"` - FloatEncoding *string `json:"floatEncoding"` - BinaryValue []byte `json:"binaryValue"` - MediaType *string `json:"mediaType"` - } - a := Alias{} - - // Error with unmarshaling - if err = json.Unmarshal(data, &a); err != nil { - return err - } - - // Set the fields - if a.Id != nil { - r.Id = *a.Id - } - if a.Device != nil { - r.Device = *a.Device - } - if a.Name != nil { - r.Name = *a.Name - } - if a.Value != nil { - r.Value = *a.Value - } - if a.ValueType != nil { - r.ValueType = normalizeValueTypeCase(*a.ValueType) - } - if a.FloatEncoding != nil { - r.FloatEncoding = *a.FloatEncoding - } - if a.MediaType != nil { - r.MediaType = *a.MediaType - } - r.Pushed = a.Pushed - r.Created = a.Created - r.Origin = a.Origin - r.Modified = a.Modified - r.BinaryValue = a.BinaryValue - - r.isValidated, err = r.Validate() - return err -} - -// Validate satisfies the Validator interface -func (r Reading) Validate() (bool, error) { - // Shortcut if Reading has already been validated - if r.isValidated { - return true, nil - } - - if r.Name == "" { - return false, NewErrContractInvalid("name for reading's value descriptor not specified") - } - // We do not expect the BinaryValue to always be present. This is due to an architectural decision to not persist - // Binary readings to save on memory. This means that the BinaryValue is only expected to be populated when creating - // a new reading or event. Otherwise the value will be empty as it will be coming from the database where we are - // explicitly not storing the information. - if r.ValueType != ValueTypeBinary && r.Value == "" { - return false, NewErrContractInvalid("reading has no value") - } - - // Even though we do not want to enforce the BinaryValue always being present for Readings, we still want to enforce - // the MediaType being specified when the BinaryValue is provided. This will most likely only take affect when - // creating and updating events or readings. - if len(r.BinaryValue) != 0 && len(r.MediaType) == 0 { - return false, NewErrContractInvalid("media type must be specified for binary values") - } - - if (r.ValueType == ValueTypeFloat32 || r.ValueType == ValueTypeFloat64) && len(r.FloatEncoding) == 0 { - return false, NewErrContractInvalid("float encoding must be specified for float values") - } - return true, nil -} - -// normalizeValueTypeCase normalize the reading's valueType to upper camel case -func normalizeValueTypeCase(valueType string) string { - normalized := strings.Title(strings.ToLower(valueType)) - normalized = strings.ReplaceAll(normalized, "array", "Array") - return normalized -} - -// String returns a JSON encoded string representation of the model -func (r Reading) String() string { - out, err := json.Marshal(r) - if err != nil { - return err.Error() - } - - return string(out) -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/resourceoperation.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/resourceoperation.go deleted file mode 100644 index f4dfcfec..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/resourceoperation.go +++ /dev/null @@ -1,151 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import "encoding/json" - -type ResourceOperation struct { - Index string `json:"index" yaml:"index,omitempty"` - Operation string `json:"operation" yaml:"operation,omitempty"` - Object string `json:"object" yaml:"object,omitempty"` // Deprecated - DeviceResource string `json:"deviceResource" yaml:"deviceResource,omitempty"` // The replacement of Object field - Parameter string `json:"parameter" yaml:"parameter,omitempty"` - Resource string `json:"resource" yaml:"resource,omitempty"` // Deprecated - DeviceCommand string `json:"deviceCommand" yaml:"deviceCommand,omitempty"` // The replacement of Resource field - Secondary []string `json:"secondary" yaml:"secondary,omitempty"` - Mappings map[string]string `json:"mappings" yaml:"mappings,omitempty"` - isValidated bool // internal member used for validation check -} - -// MarshalJSON returns a JSON encoded byte representation of the model and performs custom autofill -func (ro ResourceOperation) MarshalJSON() ([]byte, error) { - test := struct { - Index string `json:"index,omitempty"` - Operation string `json:"operation,omitempty"` - Object string `json:"object,omitempty"` - DeviceResource string `json:"deviceResource,omitempty"` - Parameter string `json:"parameter,omitempty"` - Resource string `json:"resource,omitempty"` - DeviceCommand string `json:"deviceCommand,omitempty"` - Secondary []string `json:"secondary,omitempty"` - Mappings *map[string]string `json:"mappings,omitempty"` - }{ - Index: ro.Index, - Operation: ro.Operation, - Object: ro.Object, - DeviceResource: ro.DeviceResource, - Parameter: ro.Parameter, - Resource: ro.Resource, - DeviceCommand: ro.DeviceCommand, - Secondary: ro.Secondary, - Mappings: &ro.Mappings, - } - - // Empty maps are nil - if len(ro.Mappings) == 0 { - test.Mappings = nil - } - - if ro.DeviceResource != "" { - test.Object = ro.DeviceResource - } else if ro.Object != "" { - test.Object = ro.Object - test.DeviceResource = ro.Object - } - - if ro.DeviceCommand != "" { - test.Resource = ro.DeviceCommand - } else if ro.Resource != "" { - test.DeviceCommand = ro.Resource - } - - return json.Marshal(test) -} - -// UnmarshalJSON implements the Unmarshaler interface for the ResourceOperation type -func (ro *ResourceOperation) UnmarshalJSON(data []byte) error { - var err error - type Alias struct { - Index *string `json:"index"` - Operation *string `json:"operation"` - Object *string `json:"object"` - DeviceResource *string `json:"deviceResource"` - Parameter *string `json:"parameter"` - Resource *string `json:"resource"` - DeviceCommand *string `json:"deviceCommand"` - Secondary []string `json:"secondary"` - Mappings map[string]string `json:"mappings"` - } - a := Alias{} - // Error with unmarshaling - if err = json.Unmarshal(data, &a); err != nil { - return err - } - - // Check nil fields - if a.Index != nil { - ro.Index = *a.Index - } - if a.Operation != nil { - ro.Operation = *a.Operation - } - if a.DeviceResource != nil { - ro.DeviceResource = *a.DeviceResource - ro.Object = *a.DeviceResource - } else if a.Object != nil { - ro.Object = *a.Object - ro.DeviceResource = *a.Object - } - if a.Parameter != nil { - ro.Parameter = *a.Parameter - } - if a.DeviceCommand != nil { - ro.DeviceCommand = *a.DeviceCommand - ro.Resource = *a.DeviceCommand - } else if a.Resource != nil { - ro.Resource = *a.Resource - ro.DeviceCommand = *a.Resource - } - ro.Secondary = a.Secondary - ro.Mappings = a.Mappings - - ro.isValidated, err = ro.Validate() - - return err -} - -// Validate satisfies the Validator interface -func (ro ResourceOperation) Validate() (bool, error) { - if !ro.isValidated { - if ro.Object == "" && ro.DeviceResource == "" { - return false, NewErrContractInvalid("Object and DeviceResource are both blank") - } - err := validate(ro) - if err != nil { - return false, err - } - return true, nil - } - return ro.isValidated, nil -} - -// String returns a JSON encoded string representation of the model -func (ro ResourceOperation) String() string { - out, err := json.Marshal(ro) - if err != nil { - return err.Error() - } - return string(out) -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/response.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/response.go deleted file mode 100644 index ed55c702..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/response.go +++ /dev/null @@ -1,54 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import ( - "encoding/json" - "reflect" - "strings" -) - -// Response for a Get or Put request to a service -type Response struct { - Code string `json:"code,omitempty" yaml:"code,omitempty"` - Description string `json:"description,omitempty" yaml:"description,omitempty"` - ExpectedValues []string `json:"expectedValues,omitempty" yaml:"expectedValues,omitempty"` -} - -// String returns a JSON encoded string representation of the model -func (r Response) String() string { - out, err := json.Marshal(r) - if err != nil { - return err.Error() - } - return string(out) -} - -func (r Response) Equals(r2 Response) bool { - if strings.Compare(r.Code, r2.Code) != 0 { - return false - } - if strings.Compare(r.Description, r2.Description) != 0 { - return false - } - if len(r.ExpectedValues) != len(r2.ExpectedValues) { - return false - } - if !reflect.DeepEqual(r.ExpectedValues, r2.ExpectedValues) { - return false - } - return true - -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/severity.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/severity.go deleted file mode 100644 index 22ba5b34..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/severity.go +++ /dev/null @@ -1,51 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Technologies Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - * - *******************************************************************************/ - -package models - -import ( - "encoding/json" - "fmt" -) - -type NotificationsSeverity string - -const ( - Critical = "CRITICAL" - Normal = "NORMAL" -) - -func (as *NotificationsSeverity) UnmarshalJSON(data []byte) error { - // Extract the string from data. - var s string - if err := json.Unmarshal(data, &s); err != nil { - return fmt.Errorf("NotificationsSeverity should be a string, got %s", data) - } - - got, err := map[string]NotificationsSeverity{"CRITICAL": Critical, "NORMAL": Normal}[s] - if !err { - return fmt.Errorf("invalid NotificationsSeverity %q", s) - } - *as = got - return nil -} - -func IsNotificationsSeverity(as string) bool { - _, err := map[string]NotificationsSeverity{"CRITICAL": Critical, "NORMAL": Normal}[as] - if !err { - return false - } - return true -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/sma_operation.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/sma_operation.go deleted file mode 100644 index 8951473a..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/sma_operation.go +++ /dev/null @@ -1,64 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Technologies Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - * - *******************************************************************************/ - -package models - -import ( - "encoding/json" -) - -/* - * An Operation for SMA processing. - * - * - * Operation struct - */ -type Operation struct { - Action string `bson:"action" json:"action,omitempty"` - Services []string `bson:"services,omitempty" json:"services,omitempty"` -} - -//Implements unmarshaling of JSON string to Operation type instance -func (o *Operation) UnmarshalJSON(data []byte) error { - test := struct { - Action *string `json:"action"` - Services []string `json:"services"` - }{} - - //Verify that incoming string will unmarshal successfully - if err := json.Unmarshal(data, &test); err != nil { - return err - } - - //If so, copy the fields - if test.Action != nil { - o.Action = *test.Action - } - - o.Services = []string{} - if len(test.Services) > 0 { - o.Services = test.Services - } - return nil -} - -// String returns a JSON encoded string representation of the model -func (o Operation) String() string { - out, err := json.Marshal(o) - if err != nil { - return err.Error() - } - return string(out) -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/status.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/status.go deleted file mode 100644 index 6ac808b2..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/status.go +++ /dev/null @@ -1,52 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Technologies Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - * - *******************************************************************************/ - -package models - -import ( - "encoding/json" - "fmt" -) - -type NotificationsStatus string - -const ( - New = "NEW" - Processed = "PROCESSED" - Escalated = "ESCALATED" -) - -func (as *NotificationsStatus) UnmarshalJSON(data []byte) error { - // Extract the string from data. - var s string - if err := json.Unmarshal(data, &s); err != nil { - return fmt.Errorf("NotificationsStatus should be a string, got %s", data) - } - - got, err := map[string]NotificationsStatus{"NEW": New, "PROCESSED": Processed, "ESCALATED": Escalated}[s] - if !err { - return fmt.Errorf("invalid NotificationsStatus %q", s) - } - *as = got - return nil -} - -func IsNotificationsStatus(as string) bool { - _, err := map[string]NotificationsStatus{"NEW": New, "PROCESSED": Processed, "ESCALATED": Escalated}[as] - if !err { - return false - } - return true -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/subscription.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/subscription.go deleted file mode 100644 index 0f1e86f8..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/subscription.go +++ /dev/null @@ -1,41 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Technologies Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - * - *******************************************************************************/ - -package models - -import ( - "encoding/json" -) - -// Subscription represents an object for notification alerts -type Subscription struct { - Timestamps - ID string `json:"id,omitempty"` - Slug string `json:"slug,omitempty"` - Receiver string `json:"receiver,omitempty"` - Description string `json:"description,omitempty"` - SubscribedCategories []NotificationsCategory `json:"subscribedCategories,omitempty"` - SubscribedLabels []string `json:"subscribedLabels,omitempty"` - Channels []Channel `json:"channels,omitempty"` -} - -// String returns a JSON encoded string representation of the model -func (s Subscription) String() string { - out, err := json.Marshal(s) - if err != nil { - return err.Error() - } - return string(out) -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/timestamps.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/timestamps.go deleted file mode 100644 index fb5a0ee2..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/timestamps.go +++ /dev/null @@ -1,44 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import ( - "encoding/json" -) - -type Timestamps struct { - Created int64 `json:"created,omitempty" yaml:"created,omitempty"` - Modified int64 `json:"modified,omitempty" yaml:"modified,omitempty"` - Origin int64 `json:"origin,omitempty" yaml:"origin,omitempty"` -} - -// String returns a JSON encoded string representation of the model -func (ts *Timestamps) String() string { - out, err := json.Marshal(ts) - if err != nil { - return err.Error() - } - return string(out) -} - -/* - * Compare the Created of two objects to determine given is newer - */ -func (ts *Timestamps) compareTo(i Timestamps) int { - if i.Created > ts.Created { - return 1 - } - return -1 -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/transmission.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/transmission.go deleted file mode 100644 index eb0851af..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/transmission.go +++ /dev/null @@ -1,147 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Technologies Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - * - *******************************************************************************/ - -package models - -import ( - "encoding/json" - "reflect" -) - -type Transmission struct { - Timestamps - ID string `json:"id"` - Notification Notification `json:"notification"` - Receiver string `json:"receiver,omitempty"` - Channel Channel `json:"channel,omitempty"` - Status TransmissionStatus `json:"status,omitempty"` - ResendCount int `json:"resendcount"` - Records []TransmissionRecord `json:"records,omitempty"` - isValidated bool -} - -// Marshal returns a JSON encoded byte array representation of the model -func (t Transmission) MarshalJSON() ([]byte, error) { - alias := struct { - Timestamps - ID string `json:"id,omitempty"` - Notification *Notification `json:"notification,omitempty"` - Receiver string `json:"receiver,omitempty"` - Channel *Channel `json:"channel,omitempty"` - Status TransmissionStatus `json:"status,omitempty"` - ResendCount *int `json:"resendcount,omitempty"` - Records []TransmissionRecord `json:"records,omitempty"` - }{ - Timestamps: t.Timestamps, - ID: t.ID, - Notification: &t.Notification, - Receiver: t.Receiver, - Channel: &t.Channel, - Status: t.Status, - ResendCount: &t.ResendCount, - Records: t.Records, - } - - // if we don't use omitempty, then an empty object always has a ResendCount included - // if we do use omitempty, then a ResendCount of 0 is not included in the object when it should be - if reflect.DeepEqual(t, Transmission{}) { - alias.ResendCount = nil - } - // do not marshal empty member objects - if reflect.DeepEqual(t.Notification, Notification{}) { - alias.Notification = nil - } - if reflect.DeepEqual(t.Channel, Channel{}) { - alias.Channel = nil - } - - return json.Marshal(alias) -} - -// UnmarshalJSON implements the Unmarshaler interface for the Transmission type -func (t *Transmission) UnmarshalJSON(data []byte) error { - var err error - type Alias struct { - Timestamps - ID *string `json:"id"` - Notification Notification `json:"notification,omitempty"` - Receiver *string `json:"receiver,omitempty"` - Channel Channel `json:"channel,omitempty"` - Status TransmissionStatus `json:"status,omitempty"` - ResendCount int `json:"resendcount"` - Records []TransmissionRecord `json:"records,omitempty"` - } - a := Alias{} - // Error with unmarshaling - if err = json.Unmarshal(data, &a); err != nil { - return err - } - // Nillable fields - if a.ID != nil { - t.ID = *a.ID - } - if a.Receiver != nil { - t.Receiver = *a.Receiver - } - - t.Notification = a.Notification - t.Channel = a.Channel - t.Status = a.Status - t.ResendCount = a.ResendCount - t.Records = a.Records - t.Timestamps = a.Timestamps - - t.isValidated, err = t.Validate() - - return err -} - -// Validate satisfies the Validator interface -func (t Transmission) Validate() (bool, error) { - if !t.isValidated { - - if t.Notification.Slug == "" { - return false, NewErrContractInvalid("Transmission's Notification is blank") - } - if t.Receiver == "" { - return false, NewErrContractInvalid("Transmission's Receiver is blank") - } - if t.Channel.Type == "" { - return false, NewErrContractInvalid("Transmission's Channel is blank") - } - if t.Status == "" { - return false, NewErrContractInvalid("Transmission's Status is blank") - } - if t.ResendCount < 0 { - return false, NewErrContractInvalid("Transmission's ResendCount is blank") - } - - err := validate(t) - if err != nil { - return false, err - } - return true, nil - } - return t.isValidated, nil -} - -// String returns a JSON encoded string representation of the model -func (t Transmission) String() string { - out, err := json.Marshal(t) - if err != nil { - return err.Error() - } - return string(out) -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/transmission_record.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/transmission_record.go deleted file mode 100644 index cdecc199..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/transmission_record.go +++ /dev/null @@ -1,35 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Technologies Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - * - *******************************************************************************/ - -package models - -import ( - "encoding/json" -) - -type TransmissionRecord struct { - Status TransmissionStatus `json:"status,omitempty"` - Response string `json:"response,omitempty"` - Sent int64 `json:"sent,omitempty"` -} - -// String returns a JSON encoded string representation of the model -func (t TransmissionRecord) String() string { - out, err := json.Marshal(t) - if err != nil { - return err.Error() - } - return string(out) -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/transmission_status.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/transmission_status.go deleted file mode 100644 index d4202072..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/transmission_status.go +++ /dev/null @@ -1,62 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Technologies Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - * - *******************************************************************************/ - -package models - -import ( - "encoding/json" - "fmt" - "strings" -) - -type TransmissionStatus string - -const ( - Failed = "FAILED" - Sent = "SENT" - Acknowledged = "ACKNOWLEDGED" - Trxescalated = "TRXESCALATED" -) - -/* - * Unmarshal the enum type - */ -func (as *TransmissionStatus) UnmarshalJSON(data []byte) error { - // Extract the string from data. - var s string - if err := json.Unmarshal(data, &s); err != nil { - return fmt.Errorf("TransmissionStatus should be a string, got %s", data) - } - - new := TransmissionStatus(strings.ToUpper(s)) - *as = new - return nil -} - -func (as TransmissionStatus) Validate() (bool, error) { - _, found := map[string]TransmissionStatus{"FAILED": Failed, "SENT": Sent, "ACKNOWLEDGED": Acknowledged, "TRXESCALATED": Trxescalated}[string(as)] - if !found { - return false, NewErrContractInvalid(fmt.Sprintf("invalid Transmission Status %q", as)) - } - return true, nil -} - -func IsTransmissionStatus(as string) bool { - _, err := map[string]TransmissionStatus{"FAILED": Failed, "SENT": Sent, "ACKNOWLEDGED": Acknowledged, "TRXESCALATED": Trxescalated}[as] - if !err { - return false - } - return true -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/units.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/units.go deleted file mode 100644 index fdc409fa..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/units.go +++ /dev/null @@ -1,32 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import "encoding/json" - -type Units struct { - Type string `json:"type,omitempty" yaml:"type,omitempty"` - ReadWrite string `json:"readWrite,omitempty" yaml:"readWrite,omitempty"` - DefaultValue string `json:"defaultValue,omitempty" yaml:"defaultValue,omitempty"` -} - -// String returns a JSON encoded string representation of the model -func (u Units) String() string { - out, err := json.Marshal(u) - if err != nil { - return err.Error() - } - return string(out) -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/validator.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/validator.go deleted file mode 100644 index 74b8573d..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/validator.go +++ /dev/null @@ -1,53 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import ( - "reflect" -) - -const ( - ValidateTag = "validate" -) - -// Validator provides an interface for struct types to implement validation of their internal state. They can also -// indicate to a caller whether their validation has already been completed. -// -// NOTE: This cannot be applied to struct types that are simply aliased to a primitive. -type Validator interface { - // Validate performs integrity checks on the internal state of the model. It returns a boolean indicating whether - // the validation passed or not, and the associated error if validation was unsuccessful. - Validate() (bool, error) -} - -func validate(t interface{}) error { - val := reflect.ValueOf(t) - typ := reflect.TypeOf(t) - fields := val.NumField() - for f := 0; f < fields; f++ { - field := val.Field(f) - typfield := typ.Field(f) - if field.Type().NumMethod() > 0 && field.CanInterface() && typfield.Tag.Get(ValidateTag) != "-" { - if v, ok := field.Interface().(Validator); ok { - cast := v.(Validator) - _, err := cast.Validate() - if err != nil { - return NewErrContractInvalid(err.Error()) - } - } - } - } - return nil -} diff --git a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/value-descriptor.go b/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/value-descriptor.go deleted file mode 100644 index 0e42854b..00000000 --- a/vendor/github.com/edgexfoundry/go-mod-core-contracts/models/value-descriptor.go +++ /dev/null @@ -1,164 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Dell Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - *******************************************************************************/ - -package models - -import ( - "encoding/json" - "fmt" - "regexp" -) - -// defaultValueDescriptorFormat defines the default formatting value used with creating a ValueDescriptor from a DeviceResource. -const defaultValueDescriptorFormat = "%s" - -/* - * Value Descriptor Struct - */ -type ValueDescriptor struct { - Id string `json:"id,omitempty"` - Created int64 `json:"created,omitempty"` - Description string `json:"description,omitempty"` - Modified int64 `json:"modified,omitempty"` - Origin int64 `json:"origin,omitempty"` - Name string `json:"name,omitempty"` - Min interface{} `json:"min,omitempty"` - Max interface{} `json:"max,omitempty"` - DefaultValue interface{} `json:"defaultValue,omitempty"` - Type string `json:"type,omitempty"` - UomLabel string `json:"uomLabel,omitempty"` - Formatting string `json:"formatting,omitempty"` - Labels []string `json:"labels,omitempty"` - MediaType string `json:"mediaType,omitempty"` - FloatEncoding string `json:"floatEncoding,omitempty"` - isValidated bool // internal member used for validation check -} - -// UnmarshalJSON implements the Unmarshaler interface for the ValueDescriptor type -func (v *ValueDescriptor) UnmarshalJSON(data []byte) error { - var err error - type Alias struct { - Id *string `json:"id"` - Created int64 `json:"created"` - Description *string `json:"description"` - Modified int64 `json:"modified"` - Origin int64 `json:"origin"` - Name *string `json:"name"` - Min *interface{} `json:"min"` - Max *interface{} `json:"max"` - DefaultValue *interface{} `json:"defaultValue"` - Type *string `json:"type"` - UomLabel *string `json:"uomLabel"` - Formatting *string `json:"formatting"` - Labels []string `json:"labels"` - MediaType *string `json:"mediaType"` - FloatEncoding *string `json:"floatEncoding"` - } - a := Alias{} - // Error with unmarshaling - if err = json.Unmarshal(data, &a); err != nil { - return err - } - - // Set the fields - if a.Id != nil { - v.Id = *a.Id - } - if a.Description != nil { - v.Description = *a.Description - } - if a.Name != nil { - v.Name = *a.Name - } - if a.Min != nil { - v.Min = *a.Min - } - if a.Max != nil { - v.Max = *a.Max - } - if a.DefaultValue != nil { - v.DefaultValue = *a.DefaultValue - } - if a.Type != nil { - v.Type = *a.Type - } - if a.UomLabel != nil { - v.UomLabel = *a.UomLabel - } - if a.Formatting != nil { - v.Formatting = *a.Formatting - } - if a.MediaType != nil { - v.MediaType = *a.MediaType - } - if a.FloatEncoding != nil { - v.FloatEncoding = *a.FloatEncoding - } - v.Created = a.Created - v.Modified = a.Modified - v.Origin = a.Origin - v.Labels = a.Labels - - v.isValidated, err = v.Validate() - return err -} - -// Validate satisfies the Validator interface -func (v ValueDescriptor) Validate() (bool, error) { - if !v.isValidated { - if v.Formatting != "" { - formatSpecifier := "%(\\d+\\$)?([-#+ 0,(\\<]*)?(\\d+)?(\\.\\d+)?([tT])?([a-zA-Z%])" - match, err := regexp.MatchString(formatSpecifier, v.Formatting) - if err != nil { - return false, NewErrContractInvalid(fmt.Sprintf("error validating format string: %s", v.Formatting)) - } - if !match { - return false, NewErrContractInvalid(fmt.Sprintf("format is not a valid printf format: %s", v.Formatting)) - } - } - if v.Name == "" { - return false, NewErrContractInvalid("name for value descriptor not specified") - } - } - return true, nil -} - -// String returns a JSON encoded string representation of the model -func (a ValueDescriptor) String() string { - out, err := json.Marshal(a) - if err != nil { - return err.Error() - } - return string(out) -} - -// From creates a ValueDescriptor based on the information provided in the DeviceResource. -func From(dr DeviceResource) ValueDescriptor { - value := dr.Properties.Value - units := dr.Properties.Units - desc := ValueDescriptor{ - Name: dr.Name, - Min: value.Minimum, - Max: value.Maximum, - Type: value.Type, - UomLabel: units.DefaultValue, - DefaultValue: value.DefaultValue, - Formatting: defaultValueDescriptorFormat, - Description: dr.Description, - FloatEncoding: value.FloatEncoding, - MediaType: value.MediaType, - } - - return desc -} diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md deleted file mode 100644 index 2bd78667..00000000 --- a/vendor/github.com/google/uuid/CHANGELOG.md +++ /dev/null @@ -1,10 +0,0 @@ -# Changelog - -## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) - - -### Bug Fixes - -* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0)) - -## Changelog diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md deleted file mode 100644 index 55668887..00000000 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ /dev/null @@ -1,26 +0,0 @@ -# How to contribute - -We definitely welcome patches and contribution to this project! - -### Tips - -Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org). - -Always try to include a test case! If it is not possible or not necessary, -please explain why in the pull request description. - -### Releasing - -Commits that would precipitate a SemVer change, as desrcibed in the Conventional -Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) -to create a release candidate pull request. Once submitted, `release-please` -will create a release. - -For tips on how to work with `release-please`, see its documentation. - -### Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). - -You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS deleted file mode 100644 index b4bb97f6..00000000 --- a/vendor/github.com/google/uuid/CONTRIBUTORS +++ /dev/null @@ -1,9 +0,0 @@ -Paul Borman -bmatsuo -shawnps -theory -jboverfelt -dsymonds -cd1 -wallclockbuilder -dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE deleted file mode 100644 index 5dc68268..00000000 --- a/vendor/github.com/google/uuid/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009,2014 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md deleted file mode 100644 index 3e9a6188..00000000 --- a/vendor/github.com/google/uuid/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# uuid -The uuid package generates and inspects UUIDs based on -[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122) -and DCE 1.1: Authentication and Security Services. - -This package is based on the github.com/pborman/uuid package (previously named -code.google.com/p/go-uuid). It differs from these earlier packages in that -a UUID is a 16 byte array rather than a byte slice. One loss due to this -change is the ability to represent an invalid UUID (vs a NIL UUID). - -###### Install -```sh -go get github.com/google/uuid -``` - -###### Documentation -[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid) - -Full `go doc` style documentation for the package can be viewed online without -installing this package by using the GoDoc site here: -http://pkg.go.dev/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go deleted file mode 100644 index fa820b9d..00000000 --- a/vendor/github.com/google/uuid/dce.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "fmt" - "os" -) - -// A Domain represents a Version 2 domain -type Domain byte - -// Domain constants for DCE Security (Version 2) UUIDs. -const ( - Person = Domain(0) - Group = Domain(1) - Org = Domain(2) -) - -// NewDCESecurity returns a DCE Security (Version 2) UUID. -// -// The domain should be one of Person, Group or Org. -// On a POSIX system the id should be the users UID for the Person -// domain and the users GID for the Group. The meaning of id for -// the domain Org or on non-POSIX systems is site defined. -// -// For a given domain/id pair the same token may be returned for up to -// 7 minutes and 10 seconds. -func NewDCESecurity(domain Domain, id uint32) (UUID, error) { - uuid, err := NewUUID() - if err == nil { - uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 - uuid[9] = byte(domain) - binary.BigEndian.PutUint32(uuid[0:], id) - } - return uuid, err -} - -// NewDCEPerson returns a DCE Security (Version 2) UUID in the person -// domain with the id returned by os.Getuid. -// -// NewDCESecurity(Person, uint32(os.Getuid())) -func NewDCEPerson() (UUID, error) { - return NewDCESecurity(Person, uint32(os.Getuid())) -} - -// NewDCEGroup returns a DCE Security (Version 2) UUID in the group -// domain with the id returned by os.Getgid. -// -// NewDCESecurity(Group, uint32(os.Getgid())) -func NewDCEGroup() (UUID, error) { - return NewDCESecurity(Group, uint32(os.Getgid())) -} - -// Domain returns the domain for a Version 2 UUID. Domains are only defined -// for Version 2 UUIDs. -func (uuid UUID) Domain() Domain { - return Domain(uuid[9]) -} - -// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 -// UUIDs. -func (uuid UUID) ID() uint32 { - return binary.BigEndian.Uint32(uuid[0:4]) -} - -func (d Domain) String() string { - switch d { - case Person: - return "Person" - case Group: - return "Group" - case Org: - return "Org" - } - return fmt.Sprintf("Domain%d", int(d)) -} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go deleted file mode 100644 index 5b8a4b9a..00000000 --- a/vendor/github.com/google/uuid/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package uuid generates and inspects UUIDs. -// -// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security -// Services. -// -// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to -// maps or compared directly. -package uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go deleted file mode 100644 index b404f4be..00000000 --- a/vendor/github.com/google/uuid/hash.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "crypto/md5" - "crypto/sha1" - "hash" -) - -// Well known namespace IDs and UUIDs -var ( - NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) - Nil UUID // empty UUID, all zeros -) - -// NewHash returns a new UUID derived from the hash of space concatenated with -// data generated by h. The hash should be at least 16 byte in length. The -// first 16 bytes of the hash are used to form the UUID. The version of the -// UUID will be the lower 4 bits of version. NewHash is used to implement -// NewMD5 and NewSHA1. -func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { - h.Reset() - h.Write(space[:]) //nolint:errcheck - h.Write(data) //nolint:errcheck - s := h.Sum(nil) - var uuid UUID - copy(uuid[:], s) - uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) - uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant - return uuid -} - -// NewMD5 returns a new MD5 (Version 3) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(md5.New(), space, data, 3) -func NewMD5(space UUID, data []byte) UUID { - return NewHash(md5.New(), space, data, 3) -} - -// NewSHA1 returns a new SHA1 (Version 5) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(sha1.New(), space, data, 5) -func NewSHA1(space UUID, data []byte) UUID { - return NewHash(sha1.New(), space, data, 5) -} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go deleted file mode 100644 index 14bd3407..00000000 --- a/vendor/github.com/google/uuid/marshal.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "fmt" - -// MarshalText implements encoding.TextMarshaler. -func (uuid UUID) MarshalText() ([]byte, error) { - var js [36]byte - encodeHex(js[:], uuid) - return js[:], nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (uuid *UUID) UnmarshalText(data []byte) error { - id, err := ParseBytes(data) - if err != nil { - return err - } - *uuid = id - return nil -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (uuid UUID) MarshalBinary() ([]byte, error) { - return uuid[:], nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (uuid *UUID) UnmarshalBinary(data []byte) error { - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - copy(uuid[:], data) - return nil -} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go deleted file mode 100644 index d651a2b0..00000000 --- a/vendor/github.com/google/uuid/node.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "sync" -) - -var ( - nodeMu sync.Mutex - ifname string // name of interface being used - nodeID [6]byte // hardware for version 1 UUIDs - zeroID [6]byte // nodeID with only 0's -) - -// NodeInterface returns the name of the interface from which the NodeID was -// derived. The interface "user" is returned if the NodeID was set by -// SetNodeID. -func NodeInterface() string { - defer nodeMu.Unlock() - nodeMu.Lock() - return ifname -} - -// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. -// If name is "" then the first usable interface found will be used or a random -// Node ID will be generated. If a named interface cannot be found then false -// is returned. -// -// SetNodeInterface never fails when name is "". -func SetNodeInterface(name string) bool { - defer nodeMu.Unlock() - nodeMu.Lock() - return setNodeInterface(name) -} - -func setNodeInterface(name string) bool { - iname, addr := getHardwareInterface(name) // null implementation for js - if iname != "" && addr != nil { - ifname = iname - copy(nodeID[:], addr) - return true - } - - // We found no interfaces with a valid hardware address. If name - // does not specify a specific interface generate a random Node ID - // (section 4.1.6) - if name == "" { - ifname = "random" - randomBits(nodeID[:]) - return true - } - return false -} - -// NodeID returns a slice of a copy of the current Node ID, setting the Node ID -// if not already set. -func NodeID() []byte { - defer nodeMu.Unlock() - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nid := nodeID - return nid[:] -} - -// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes -// of id are used. If id is less than 6 bytes then false is returned and the -// Node ID is not set. -func SetNodeID(id []byte) bool { - if len(id) < 6 { - return false - } - defer nodeMu.Unlock() - nodeMu.Lock() - copy(nodeID[:], id) - ifname = "user" - return true -} - -// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is -// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) NodeID() []byte { - var node [6]byte - copy(node[:], uuid[10:]) - return node[:] -} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go deleted file mode 100644 index b2a0bc87..00000000 --- a/vendor/github.com/google/uuid/node_js.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build js - -package uuid - -// getHardwareInterface returns nil values for the JS version of the code. -// This removes the "net" dependency, because it is not used in the browser. -// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. -func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go deleted file mode 100644 index 0cbbcddb..00000000 --- a/vendor/github.com/google/uuid/node_net.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !js - -package uuid - -import "net" - -var interfaces []net.Interface // cached list of interfaces - -// getHardwareInterface returns the name and hardware address of interface name. -// If name is "" then the name and hardware address of one of the system's -// interfaces is returned. If no interfaces are found (name does not exist or -// there are no interfaces) then "", nil is returned. -// -// Only addresses of at least 6 bytes are returned. -func getHardwareInterface(name string) (string, []byte) { - if interfaces == nil { - var err error - interfaces, err = net.Interfaces() - if err != nil { - return "", nil - } - } - for _, ifs := range interfaces { - if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { - return ifs.Name, ifs.HardwareAddr - } - } - return "", nil -} diff --git a/vendor/github.com/google/uuid/null.go b/vendor/github.com/google/uuid/null.go deleted file mode 100644 index d7fcbf28..00000000 --- a/vendor/github.com/google/uuid/null.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2021 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "database/sql/driver" - "encoding/json" - "fmt" -) - -var jsonNull = []byte("null") - -// NullUUID represents a UUID that may be null. -// NullUUID implements the SQL driver.Scanner interface so -// it can be used as a scan destination: -// -// var u uuid.NullUUID -// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u) -// ... -// if u.Valid { -// // use u.UUID -// } else { -// // NULL value -// } -// -type NullUUID struct { - UUID UUID - Valid bool // Valid is true if UUID is not NULL -} - -// Scan implements the SQL driver.Scanner interface. -func (nu *NullUUID) Scan(value interface{}) error { - if value == nil { - nu.UUID, nu.Valid = Nil, false - return nil - } - - err := nu.UUID.Scan(value) - if err != nil { - nu.Valid = false - return err - } - - nu.Valid = true - return nil -} - -// Value implements the driver Valuer interface. -func (nu NullUUID) Value() (driver.Value, error) { - if !nu.Valid { - return nil, nil - } - // Delegate to UUID Value function - return nu.UUID.Value() -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (nu NullUUID) MarshalBinary() ([]byte, error) { - if nu.Valid { - return nu.UUID[:], nil - } - - return []byte(nil), nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (nu *NullUUID) UnmarshalBinary(data []byte) error { - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - copy(nu.UUID[:], data) - nu.Valid = true - return nil -} - -// MarshalText implements encoding.TextMarshaler. -func (nu NullUUID) MarshalText() ([]byte, error) { - if nu.Valid { - return nu.UUID.MarshalText() - } - - return jsonNull, nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (nu *NullUUID) UnmarshalText(data []byte) error { - id, err := ParseBytes(data) - if err != nil { - nu.Valid = false - return err - } - nu.UUID = id - nu.Valid = true - return nil -} - -// MarshalJSON implements json.Marshaler. -func (nu NullUUID) MarshalJSON() ([]byte, error) { - if nu.Valid { - return json.Marshal(nu.UUID) - } - - return jsonNull, nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (nu *NullUUID) UnmarshalJSON(data []byte) error { - if bytes.Equal(data, jsonNull) { - *nu = NullUUID{} - return nil // valid null UUID - } - err := json.Unmarshal(data, &nu.UUID) - nu.Valid = err == nil - return err -} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go deleted file mode 100644 index 2e02ec06..00000000 --- a/vendor/github.com/google/uuid/sql.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "database/sql/driver" - "fmt" -) - -// Scan implements sql.Scanner so UUIDs can be read from databases transparently. -// Currently, database types that map to string and []byte are supported. Please -// consult database-specific driver documentation for matching types. -func (uuid *UUID) Scan(src interface{}) error { - switch src := src.(type) { - case nil: - return nil - - case string: - // if an empty UUID comes from a table, we return a null UUID - if src == "" { - return nil - } - - // see Parse for required string format - u, err := Parse(src) - if err != nil { - return fmt.Errorf("Scan: %v", err) - } - - *uuid = u - - case []byte: - // if an empty UUID comes from a table, we return a null UUID - if len(src) == 0 { - return nil - } - - // assumes a simple slice of bytes if 16 bytes - // otherwise attempts to parse - if len(src) != 16 { - return uuid.Scan(string(src)) - } - copy((*uuid)[:], src) - - default: - return fmt.Errorf("Scan: unable to scan type %T into UUID", src) - } - - return nil -} - -// Value implements sql.Valuer so that UUIDs can be written to databases -// transparently. Currently, UUIDs map to strings. Please consult -// database-specific driver documentation for matching types. -func (uuid UUID) Value() (driver.Value, error) { - return uuid.String(), nil -} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go deleted file mode 100644 index e6ef06cd..00000000 --- a/vendor/github.com/google/uuid/time.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "sync" - "time" -) - -// A Time represents a time as the number of 100's of nanoseconds since 15 Oct -// 1582. -type Time int64 - -const ( - lillian = 2299160 // Julian day of 15 Oct 1582 - unix = 2440587 // Julian day of 1 Jan 1970 - epoch = unix - lillian // Days between epochs - g1582 = epoch * 86400 // seconds between epochs - g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs -) - -var ( - timeMu sync.Mutex - lasttime uint64 // last time we returned - clockSeq uint16 // clock sequence for this run - - timeNow = time.Now // for testing -) - -// UnixTime converts t the number of seconds and nanoseconds using the Unix -// epoch of 1 Jan 1970. -func (t Time) UnixTime() (sec, nsec int64) { - sec = int64(t - g1582ns100) - nsec = (sec % 10000000) * 100 - sec /= 10000000 - return sec, nsec -} - -// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and -// clock sequence as well as adjusting the clock sequence as needed. An error -// is returned if the current time cannot be determined. -func GetTime() (Time, uint16, error) { - defer timeMu.Unlock() - timeMu.Lock() - return getTime() -} - -func getTime() (Time, uint16, error) { - t := timeNow() - - // If we don't have a clock sequence already, set one. - if clockSeq == 0 { - setClockSequence(-1) - } - now := uint64(t.UnixNano()/100) + g1582ns100 - - // If time has gone backwards with this clock sequence then we - // increment the clock sequence - if now <= lasttime { - clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 - } - lasttime = now - return Time(now), clockSeq, nil -} - -// ClockSequence returns the current clock sequence, generating one if not -// already set. The clock sequence is only used for Version 1 UUIDs. -// -// The uuid package does not use global static storage for the clock sequence or -// the last time a UUID was generated. Unless SetClockSequence is used, a new -// random clock sequence is generated the first time a clock sequence is -// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) -func ClockSequence() int { - defer timeMu.Unlock() - timeMu.Lock() - return clockSequence() -} - -func clockSequence() int { - if clockSeq == 0 { - setClockSequence(-1) - } - return int(clockSeq & 0x3fff) -} - -// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to -// -1 causes a new sequence to be generated. -func SetClockSequence(seq int) { - defer timeMu.Unlock() - timeMu.Lock() - setClockSequence(seq) -} - -func setClockSequence(seq int) { - if seq == -1 { - var b [2]byte - randomBits(b[:]) // clock sequence - seq = int(b[0])<<8 | int(b[1]) - } - oldSeq := clockSeq - clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant - if oldSeq != clockSeq { - lasttime = 0 - } -} - -// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. The time is only defined for version 1 and 2 UUIDs. -func (uuid UUID) Time() Time { - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time) -} - -// ClockSequence returns the clock sequence encoded in uuid. -// The clock sequence is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) ClockSequence() int { - return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff -} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go deleted file mode 100644 index 5ea6c737..00000000 --- a/vendor/github.com/google/uuid/util.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "io" -) - -// randomBits completely fills slice b with random data. -func randomBits(b []byte) { - if _, err := io.ReadFull(rander, b); err != nil { - panic(err.Error()) // rand should never fail - } -} - -// xvalues returns the value of a byte as a hexadecimal digit or 255. -var xvalues = [256]byte{ - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, -} - -// xtob converts hex characters x1 and x2 into a byte. -func xtob(x1, x2 byte) (byte, bool) { - b1 := xvalues[x1] - b2 := xvalues[x2] - return (b1 << 4) | b2, b1 != 255 && b2 != 255 -} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go deleted file mode 100644 index a56138cc..00000000 --- a/vendor/github.com/google/uuid/uuid.go +++ /dev/null @@ -1,296 +0,0 @@ -// Copyright 2018 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "errors" - "fmt" - "io" - "strings" - "sync" -) - -// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC -// 4122. -type UUID [16]byte - -// A Version represents a UUID's version. -type Version byte - -// A Variant represents a UUID's variant. -type Variant byte - -// Constants returned by Variant. -const ( - Invalid = Variant(iota) // Invalid UUID - RFC4122 // The variant specified in RFC4122 - Reserved // Reserved, NCS backward compatibility. - Microsoft // Reserved, Microsoft Corporation backward compatibility. - Future // Reserved for future definition. -) - -const randPoolSize = 16 * 16 - -var ( - rander = rand.Reader // random function - poolEnabled = false - poolMu sync.Mutex - poolPos = randPoolSize // protected with poolMu - pool [randPoolSize]byte // protected with poolMu -) - -type invalidLengthError struct{ len int } - -func (err invalidLengthError) Error() string { - return fmt.Sprintf("invalid UUID length: %d", err.len) -} - -// IsInvalidLengthError is matcher function for custom error invalidLengthError -func IsInvalidLengthError(err error) bool { - _, ok := err.(invalidLengthError) - return ok -} - -// Parse decodes s into a UUID or returns an error. Both the standard UUID -// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the -// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex -// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. -func Parse(s string) (UUID, error) { - var uuid UUID - switch len(s) { - // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36: - - // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: - if !strings.EqualFold(s[:9], "urn:uuid:") { - return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) - } - s = s[9:] - - // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - case 36 + 2: - s = s[1:] - - // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - case 32: - var ok bool - for i := range uuid { - uuid[i], ok = xtob(s[i*2], s[i*2+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, invalidLengthError{len(s)} - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34, - } { - v, ok := xtob(s[x], s[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// ParseBytes is like Parse, except it parses a byte slice instead of a string. -func ParseBytes(b []byte) (UUID, error) { - var uuid UUID - switch len(b) { - case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) { - return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) - } - b = b[9:] - case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - b = b[1:] - case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - var ok bool - for i := 0; i < 32; i += 2 { - uuid[i/2], ok = xtob(b[i], b[i+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, invalidLengthError{len(b)} - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34, - } { - v, ok := xtob(b[x], b[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// MustParse is like Parse but panics if the string cannot be parsed. -// It simplifies safe initialization of global variables holding compiled UUIDs. -func MustParse(s string) UUID { - uuid, err := Parse(s) - if err != nil { - panic(`uuid: Parse(` + s + `): ` + err.Error()) - } - return uuid -} - -// FromBytes creates a new UUID from a byte slice. Returns an error if the slice -// does not have a length of 16. The bytes are copied from the slice. -func FromBytes(b []byte) (uuid UUID, err error) { - err = uuid.UnmarshalBinary(b) - return uuid, err -} - -// Must returns uuid if err is nil and panics otherwise. -func Must(uuid UUID, err error) UUID { - if err != nil { - panic(err) - } - return uuid -} - -// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -// , or "" if uuid is invalid. -func (uuid UUID) String() string { - var buf [36]byte - encodeHex(buf[:], uuid) - return string(buf[:]) -} - -// URN returns the RFC 2141 URN form of uuid, -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. -func (uuid UUID) URN() string { - var buf [36 + 9]byte - copy(buf[:], "urn:uuid:") - encodeHex(buf[9:], uuid) - return string(buf[:]) -} - -func encodeHex(dst []byte, uuid UUID) { - hex.Encode(dst, uuid[:4]) - dst[8] = '-' - hex.Encode(dst[9:13], uuid[4:6]) - dst[13] = '-' - hex.Encode(dst[14:18], uuid[6:8]) - dst[18] = '-' - hex.Encode(dst[19:23], uuid[8:10]) - dst[23] = '-' - hex.Encode(dst[24:], uuid[10:]) -} - -// Variant returns the variant encoded in uuid. -func (uuid UUID) Variant() Variant { - switch { - case (uuid[8] & 0xc0) == 0x80: - return RFC4122 - case (uuid[8] & 0xe0) == 0xc0: - return Microsoft - case (uuid[8] & 0xe0) == 0xe0: - return Future - default: - return Reserved - } -} - -// Version returns the version of uuid. -func (uuid UUID) Version() Version { - return Version(uuid[6] >> 4) -} - -func (v Version) String() string { - if v > 15 { - return fmt.Sprintf("BAD_VERSION_%d", v) - } - return fmt.Sprintf("VERSION_%d", v) -} - -func (v Variant) String() string { - switch v { - case RFC4122: - return "RFC4122" - case Reserved: - return "Reserved" - case Microsoft: - return "Microsoft" - case Future: - return "Future" - case Invalid: - return "Invalid" - } - return fmt.Sprintf("BadVariant%d", int(v)) -} - -// SetRand sets the random number generator to r, which implements io.Reader. -// If r.Read returns an error when the package requests random data then -// a panic will be issued. -// -// Calling SetRand with nil sets the random number generator to the default -// generator. -func SetRand(r io.Reader) { - if r == nil { - rander = rand.Reader - return - } - rander = r -} - -// EnableRandPool enables internal randomness pool used for Random -// (Version 4) UUID generation. The pool contains random bytes read from -// the random number generator on demand in batches. Enabling the pool -// may improve the UUID generation throughput significantly. -// -// Since the pool is stored on the Go heap, this feature may be a bad fit -// for security sensitive applications. -// -// Both EnableRandPool and DisableRandPool are not thread-safe and should -// only be called when there is no possibility that New or any other -// UUID Version 4 generation function will be called concurrently. -func EnableRandPool() { - poolEnabled = true -} - -// DisableRandPool disables the randomness pool if it was previously -// enabled with EnableRandPool. -// -// Both EnableRandPool and DisableRandPool are not thread-safe and should -// only be called when there is no possibility that New or any other -// UUID Version 4 generation function will be called concurrently. -func DisableRandPool() { - poolEnabled = false - defer poolMu.Unlock() - poolMu.Lock() - poolPos = randPoolSize -} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go deleted file mode 100644 index 46310962..00000000 --- a/vendor/github.com/google/uuid/version1.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" -) - -// NewUUID returns a Version 1 UUID based on the current NodeID and clock -// sequence, and the current time. If the NodeID has not been set by SetNodeID -// or SetNodeInterface then it will be set automatically. If the NodeID cannot -// be set NewUUID returns nil. If clock sequence has not been set by -// SetClockSequence then it will be set automatically. If GetTime fails to -// return the current NewUUID returns nil and an error. -// -// In most cases, New should be used. -func NewUUID() (UUID, error) { - var uuid UUID - now, seq, err := GetTime() - if err != nil { - return uuid, err - } - - timeLow := uint32(now & 0xffffffff) - timeMid := uint16((now >> 32) & 0xffff) - timeHi := uint16((now >> 48) & 0x0fff) - timeHi |= 0x1000 // Version 1 - - binary.BigEndian.PutUint32(uuid[0:], timeLow) - binary.BigEndian.PutUint16(uuid[4:], timeMid) - binary.BigEndian.PutUint16(uuid[6:], timeHi) - binary.BigEndian.PutUint16(uuid[8:], seq) - - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - copy(uuid[10:], nodeID[:]) - nodeMu.Unlock() - - return uuid, nil -} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go deleted file mode 100644 index 7697802e..00000000 --- a/vendor/github.com/google/uuid/version4.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "io" - -// New creates a new random UUID or panics. New is equivalent to -// the expression -// -// uuid.Must(uuid.NewRandom()) -func New() UUID { - return Must(NewRandom()) -} - -// NewString creates a new random UUID and returns it as a string or panics. -// NewString is equivalent to the expression -// -// uuid.New().String() -func NewString() string { - return Must(NewRandom()).String() -} - -// NewRandom returns a Random (Version 4) UUID. -// -// The strength of the UUIDs is based on the strength of the crypto/rand -// package. -// -// Uses the randomness pool if it was enabled with EnableRandPool. -// -// A note about uniqueness derived from the UUID Wikipedia entry: -// -// Randomly generated UUIDs have 122 random bits. One's annual risk of being -// hit by a meteorite is estimated to be one chance in 17 billion, that -// means the probability is about 0.00000000006 (6 × 10−11), -// equivalent to the odds of creating a few tens of trillions of UUIDs in a -// year and having one duplicate. -func NewRandom() (UUID, error) { - if !poolEnabled { - return NewRandomFromReader(rander) - } - return newRandomFromPool() -} - -// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. -func NewRandomFromReader(r io.Reader) (UUID, error) { - var uuid UUID - _, err := io.ReadFull(r, uuid[:]) - if err != nil { - return Nil, err - } - uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 - uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return uuid, nil -} - -func newRandomFromPool() (UUID, error) { - var uuid UUID - poolMu.Lock() - if poolPos == randPoolSize { - _, err := io.ReadFull(rander, pool[:]) - if err != nil { - poolMu.Unlock() - return Nil, err - } - poolPos = 0 - } - copy(uuid[:], pool[poolPos:(poolPos+16)]) - poolPos += 16 - poolMu.Unlock() - - uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 - uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return uuid, nil -} diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore deleted file mode 100644 index cd3fcd1e..00000000 --- a/vendor/github.com/gorilla/websocket/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -.idea/ -*.iml diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS deleted file mode 100644 index 1931f400..00000000 --- a/vendor/github.com/gorilla/websocket/AUTHORS +++ /dev/null @@ -1,9 +0,0 @@ -# This is the official list of Gorilla WebSocket authors for copyright -# purposes. -# -# Please keep the list sorted. - -Gary Burd -Google LLC (https://opensource.google.com/) -Joachim Bauch - diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE deleted file mode 100644 index 9171c972..00000000 --- a/vendor/github.com/gorilla/websocket/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md deleted file mode 100644 index 2517a287..00000000 --- a/vendor/github.com/gorilla/websocket/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# Gorilla WebSocket - -[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) -[![CircleCI](https://circleci.com/gh/gorilla/websocket.svg?style=svg)](https://circleci.com/gh/gorilla/websocket) - -Gorilla WebSocket is a [Go](http://golang.org/) implementation of the -[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. - - ---- - -⚠️ **[The Gorilla WebSocket Package is looking for a new maintainer](https://github.com/gorilla/websocket/issues/370)** - ---- - -### Documentation - -* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc) -* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) -* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) -* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) -* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) - -### Status - -The Gorilla WebSocket package provides a complete and tested implementation of -the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The -package API is stable. - -### Installation - - go get github.com/gorilla/websocket - -### Protocol Compliance - -The Gorilla WebSocket package passes the server tests in the [Autobahn Test -Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn -subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). - diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go deleted file mode 100644 index 2efd8355..00000000 --- a/vendor/github.com/gorilla/websocket/client.go +++ /dev/null @@ -1,422 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bytes" - "context" - "crypto/tls" - "errors" - "io" - "io/ioutil" - "net" - "net/http" - "net/http/httptrace" - "net/url" - "strings" - "time" -) - -// ErrBadHandshake is returned when the server response to opening handshake is -// invalid. -var ErrBadHandshake = errors.New("websocket: bad handshake") - -var errInvalidCompression = errors.New("websocket: invalid compression negotiation") - -// NewClient creates a new client connection using the given net connection. -// The URL u specifies the host and request URI. Use requestHeader to specify -// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies -// (Cookie). Use the response.Header to get the selected subprotocol -// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). -// -// If the WebSocket handshake fails, ErrBadHandshake is returned along with a -// non-nil *http.Response so that callers can handle redirects, authentication, -// etc. -// -// Deprecated: Use Dialer instead. -func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { - d := Dialer{ - ReadBufferSize: readBufSize, - WriteBufferSize: writeBufSize, - NetDial: func(net, addr string) (net.Conn, error) { - return netConn, nil - }, - } - return d.Dial(u.String(), requestHeader) -} - -// A Dialer contains options for connecting to WebSocket server. -// -// It is safe to call Dialer's methods concurrently. -type Dialer struct { - // NetDial specifies the dial function for creating TCP connections. If - // NetDial is nil, net.Dial is used. - NetDial func(network, addr string) (net.Conn, error) - - // NetDialContext specifies the dial function for creating TCP connections. If - // NetDialContext is nil, NetDial is used. - NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error) - - // NetDialTLSContext specifies the dial function for creating TLS/TCP connections. If - // NetDialTLSContext is nil, NetDialContext is used. - // If NetDialTLSContext is set, Dial assumes the TLS handshake is done there and - // TLSClientConfig is ignored. - NetDialTLSContext func(ctx context.Context, network, addr string) (net.Conn, error) - - // Proxy specifies a function to return a proxy for a given - // Request. If the function returns a non-nil error, the - // request is aborted with the provided error. - // If Proxy is nil or returns a nil *URL, no proxy is used. - Proxy func(*http.Request) (*url.URL, error) - - // TLSClientConfig specifies the TLS configuration to use with tls.Client. - // If nil, the default configuration is used. - // If either NetDialTLS or NetDialTLSContext are set, Dial assumes the TLS handshake - // is done there and TLSClientConfig is ignored. - TLSClientConfig *tls.Config - - // HandshakeTimeout specifies the duration for the handshake to complete. - HandshakeTimeout time.Duration - - // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer - // size is zero, then a useful default size is used. The I/O buffer sizes - // do not limit the size of the messages that can be sent or received. - ReadBufferSize, WriteBufferSize int - - // WriteBufferPool is a pool of buffers for write operations. If the value - // is not set, then write buffers are allocated to the connection for the - // lifetime of the connection. - // - // A pool is most useful when the application has a modest volume of writes - // across a large number of connections. - // - // Applications should use a single pool for each unique value of - // WriteBufferSize. - WriteBufferPool BufferPool - - // Subprotocols specifies the client's requested subprotocols. - Subprotocols []string - - // EnableCompression specifies if the client should attempt to negotiate - // per message compression (RFC 7692). Setting this value to true does not - // guarantee that compression will be supported. Currently only "no context - // takeover" modes are supported. - EnableCompression bool - - // Jar specifies the cookie jar. - // If Jar is nil, cookies are not sent in requests and ignored - // in responses. - Jar http.CookieJar -} - -// Dial creates a new client connection by calling DialContext with a background context. -func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { - return d.DialContext(context.Background(), urlStr, requestHeader) -} - -var errMalformedURL = errors.New("malformed ws or wss URL") - -func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { - hostPort = u.Host - hostNoPort = u.Host - if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { - hostNoPort = hostNoPort[:i] - } else { - switch u.Scheme { - case "wss": - hostPort += ":443" - case "https": - hostPort += ":443" - default: - hostPort += ":80" - } - } - return hostPort, hostNoPort -} - -// DefaultDialer is a dialer with all fields set to the default values. -var DefaultDialer = &Dialer{ - Proxy: http.ProxyFromEnvironment, - HandshakeTimeout: 45 * time.Second, -} - -// nilDialer is dialer to use when receiver is nil. -var nilDialer = *DefaultDialer - -// DialContext creates a new client connection. Use requestHeader to specify the -// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). -// Use the response.Header to get the selected subprotocol -// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). -// -// The context will be used in the request and in the Dialer. -// -// If the WebSocket handshake fails, ErrBadHandshake is returned along with a -// non-nil *http.Response so that callers can handle redirects, authentication, -// etcetera. The response body may not contain the entire response and does not -// need to be closed by the application. -func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { - if d == nil { - d = &nilDialer - } - - challengeKey, err := generateChallengeKey() - if err != nil { - return nil, nil, err - } - - u, err := url.Parse(urlStr) - if err != nil { - return nil, nil, err - } - - switch u.Scheme { - case "ws": - u.Scheme = "http" - case "wss": - u.Scheme = "https" - default: - return nil, nil, errMalformedURL - } - - if u.User != nil { - // User name and password are not allowed in websocket URIs. - return nil, nil, errMalformedURL - } - - req := &http.Request{ - Method: http.MethodGet, - URL: u, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - Header: make(http.Header), - Host: u.Host, - } - req = req.WithContext(ctx) - - // Set the cookies present in the cookie jar of the dialer - if d.Jar != nil { - for _, cookie := range d.Jar.Cookies(u) { - req.AddCookie(cookie) - } - } - - // Set the request headers using the capitalization for names and values in - // RFC examples. Although the capitalization shouldn't matter, there are - // servers that depend on it. The Header.Set method is not used because the - // method canonicalizes the header names. - req.Header["Upgrade"] = []string{"websocket"} - req.Header["Connection"] = []string{"Upgrade"} - req.Header["Sec-WebSocket-Key"] = []string{challengeKey} - req.Header["Sec-WebSocket-Version"] = []string{"13"} - if len(d.Subprotocols) > 0 { - req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} - } - for k, vs := range requestHeader { - switch { - case k == "Host": - if len(vs) > 0 { - req.Host = vs[0] - } - case k == "Upgrade" || - k == "Connection" || - k == "Sec-Websocket-Key" || - k == "Sec-Websocket-Version" || - k == "Sec-Websocket-Extensions" || - (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): - return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) - case k == "Sec-Websocket-Protocol": - req.Header["Sec-WebSocket-Protocol"] = vs - default: - req.Header[k] = vs - } - } - - if d.EnableCompression { - req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"} - } - - if d.HandshakeTimeout != 0 { - var cancel func() - ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout) - defer cancel() - } - - // Get network dial function. - var netDial func(network, add string) (net.Conn, error) - - switch u.Scheme { - case "http": - if d.NetDialContext != nil { - netDial = func(network, addr string) (net.Conn, error) { - return d.NetDialContext(ctx, network, addr) - } - } else if d.NetDial != nil { - netDial = d.NetDial - } - case "https": - if d.NetDialTLSContext != nil { - netDial = func(network, addr string) (net.Conn, error) { - return d.NetDialTLSContext(ctx, network, addr) - } - } else if d.NetDialContext != nil { - netDial = func(network, addr string) (net.Conn, error) { - return d.NetDialContext(ctx, network, addr) - } - } else if d.NetDial != nil { - netDial = d.NetDial - } - default: - return nil, nil, errMalformedURL - } - - if netDial == nil { - netDialer := &net.Dialer{} - netDial = func(network, addr string) (net.Conn, error) { - return netDialer.DialContext(ctx, network, addr) - } - } - - // If needed, wrap the dial function to set the connection deadline. - if deadline, ok := ctx.Deadline(); ok { - forwardDial := netDial - netDial = func(network, addr string) (net.Conn, error) { - c, err := forwardDial(network, addr) - if err != nil { - return nil, err - } - err = c.SetDeadline(deadline) - if err != nil { - c.Close() - return nil, err - } - return c, nil - } - } - - // If needed, wrap the dial function to connect through a proxy. - if d.Proxy != nil { - proxyURL, err := d.Proxy(req) - if err != nil { - return nil, nil, err - } - if proxyURL != nil { - dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial)) - if err != nil { - return nil, nil, err - } - netDial = dialer.Dial - } - } - - hostPort, hostNoPort := hostPortNoPort(u) - trace := httptrace.ContextClientTrace(ctx) - if trace != nil && trace.GetConn != nil { - trace.GetConn(hostPort) - } - - netConn, err := netDial("tcp", hostPort) - if trace != nil && trace.GotConn != nil { - trace.GotConn(httptrace.GotConnInfo{ - Conn: netConn, - }) - } - if err != nil { - return nil, nil, err - } - - defer func() { - if netConn != nil { - netConn.Close() - } - }() - - if u.Scheme == "https" && d.NetDialTLSContext == nil { - // If NetDialTLSContext is set, assume that the TLS handshake has already been done - - cfg := cloneTLSConfig(d.TLSClientConfig) - if cfg.ServerName == "" { - cfg.ServerName = hostNoPort - } - tlsConn := tls.Client(netConn, cfg) - netConn = tlsConn - - if trace != nil && trace.TLSHandshakeStart != nil { - trace.TLSHandshakeStart() - } - err := doHandshake(ctx, tlsConn, cfg) - if trace != nil && trace.TLSHandshakeDone != nil { - trace.TLSHandshakeDone(tlsConn.ConnectionState(), err) - } - - if err != nil { - return nil, nil, err - } - } - - conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil) - - if err := req.Write(netConn); err != nil { - return nil, nil, err - } - - if trace != nil && trace.GotFirstResponseByte != nil { - if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 { - trace.GotFirstResponseByte() - } - } - - resp, err := http.ReadResponse(conn.br, req) - if err != nil { - return nil, nil, err - } - - if d.Jar != nil { - if rc := resp.Cookies(); len(rc) > 0 { - d.Jar.SetCookies(u, rc) - } - } - - if resp.StatusCode != 101 || - !tokenListContainsValue(resp.Header, "Upgrade", "websocket") || - !tokenListContainsValue(resp.Header, "Connection", "upgrade") || - resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { - // Before closing the network connection on return from this - // function, slurp up some of the response to aid application - // debugging. - buf := make([]byte, 1024) - n, _ := io.ReadFull(resp.Body, buf) - resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) - return nil, resp, ErrBadHandshake - } - - for _, ext := range parseExtensions(resp.Header) { - if ext[""] != "permessage-deflate" { - continue - } - _, snct := ext["server_no_context_takeover"] - _, cnct := ext["client_no_context_takeover"] - if !snct || !cnct { - return nil, resp, errInvalidCompression - } - conn.newCompressionWriter = compressNoContextTakeover - conn.newDecompressionReader = decompressNoContextTakeover - break - } - - resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) - conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") - - netConn.SetDeadline(time.Time{}) - netConn = nil // to avoid close in defer. - return conn, resp, nil -} - -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - return cfg.Clone() -} diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go deleted file mode 100644 index 813ffb1e..00000000 --- a/vendor/github.com/gorilla/websocket/compression.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "compress/flate" - "errors" - "io" - "strings" - "sync" -) - -const ( - minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6 - maxCompressionLevel = flate.BestCompression - defaultCompressionLevel = 1 -) - -var ( - flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool - flateReaderPool = sync.Pool{New: func() interface{} { - return flate.NewReader(nil) - }} -) - -func decompressNoContextTakeover(r io.Reader) io.ReadCloser { - const tail = - // Add four bytes as specified in RFC - "\x00\x00\xff\xff" + - // Add final block to squelch unexpected EOF error from flate reader. - "\x01\x00\x00\xff\xff" - - fr, _ := flateReaderPool.Get().(io.ReadCloser) - fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) - return &flateReadWrapper{fr} -} - -func isValidCompressionLevel(level int) bool { - return minCompressionLevel <= level && level <= maxCompressionLevel -} - -func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser { - p := &flateWriterPools[level-minCompressionLevel] - tw := &truncWriter{w: w} - fw, _ := p.Get().(*flate.Writer) - if fw == nil { - fw, _ = flate.NewWriter(tw, level) - } else { - fw.Reset(tw) - } - return &flateWriteWrapper{fw: fw, tw: tw, p: p} -} - -// truncWriter is an io.Writer that writes all but the last four bytes of the -// stream to another io.Writer. -type truncWriter struct { - w io.WriteCloser - n int - p [4]byte -} - -func (w *truncWriter) Write(p []byte) (int, error) { - n := 0 - - // fill buffer first for simplicity. - if w.n < len(w.p) { - n = copy(w.p[w.n:], p) - p = p[n:] - w.n += n - if len(p) == 0 { - return n, nil - } - } - - m := len(p) - if m > len(w.p) { - m = len(w.p) - } - - if nn, err := w.w.Write(w.p[:m]); err != nil { - return n + nn, err - } - - copy(w.p[:], w.p[m:]) - copy(w.p[len(w.p)-m:], p[len(p)-m:]) - nn, err := w.w.Write(p[:len(p)-m]) - return n + nn, err -} - -type flateWriteWrapper struct { - fw *flate.Writer - tw *truncWriter - p *sync.Pool -} - -func (w *flateWriteWrapper) Write(p []byte) (int, error) { - if w.fw == nil { - return 0, errWriteClosed - } - return w.fw.Write(p) -} - -func (w *flateWriteWrapper) Close() error { - if w.fw == nil { - return errWriteClosed - } - err1 := w.fw.Flush() - w.p.Put(w.fw) - w.fw = nil - if w.tw.p != [4]byte{0, 0, 0xff, 0xff} { - return errors.New("websocket: internal error, unexpected bytes at end of flate stream") - } - err2 := w.tw.w.Close() - if err1 != nil { - return err1 - } - return err2 -} - -type flateReadWrapper struct { - fr io.ReadCloser -} - -func (r *flateReadWrapper) Read(p []byte) (int, error) { - if r.fr == nil { - return 0, io.ErrClosedPipe - } - n, err := r.fr.Read(p) - if err == io.EOF { - // Preemptively place the reader back in the pool. This helps with - // scenarios where the application does not call NextReader() soon after - // this final read. - r.Close() - } - return n, err -} - -func (r *flateReadWrapper) Close() error { - if r.fr == nil { - return io.ErrClosedPipe - } - err := r.fr.Close() - flateReaderPool.Put(r.fr) - r.fr = nil - return err -} diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go deleted file mode 100644 index 331eebc8..00000000 --- a/vendor/github.com/gorilla/websocket/conn.go +++ /dev/null @@ -1,1230 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bufio" - "encoding/binary" - "errors" - "io" - "io/ioutil" - "math/rand" - "net" - "strconv" - "strings" - "sync" - "time" - "unicode/utf8" -) - -const ( - // Frame header byte 0 bits from Section 5.2 of RFC 6455 - finalBit = 1 << 7 - rsv1Bit = 1 << 6 - rsv2Bit = 1 << 5 - rsv3Bit = 1 << 4 - - // Frame header byte 1 bits from Section 5.2 of RFC 6455 - maskBit = 1 << 7 - - maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask - maxControlFramePayloadSize = 125 - - writeWait = time.Second - - defaultReadBufferSize = 4096 - defaultWriteBufferSize = 4096 - - continuationFrame = 0 - noFrame = -1 -) - -// Close codes defined in RFC 6455, section 11.7. -const ( - CloseNormalClosure = 1000 - CloseGoingAway = 1001 - CloseProtocolError = 1002 - CloseUnsupportedData = 1003 - CloseNoStatusReceived = 1005 - CloseAbnormalClosure = 1006 - CloseInvalidFramePayloadData = 1007 - ClosePolicyViolation = 1008 - CloseMessageTooBig = 1009 - CloseMandatoryExtension = 1010 - CloseInternalServerErr = 1011 - CloseServiceRestart = 1012 - CloseTryAgainLater = 1013 - CloseTLSHandshake = 1015 -) - -// The message types are defined in RFC 6455, section 11.8. -const ( - // TextMessage denotes a text data message. The text message payload is - // interpreted as UTF-8 encoded text data. - TextMessage = 1 - - // BinaryMessage denotes a binary data message. - BinaryMessage = 2 - - // CloseMessage denotes a close control message. The optional message - // payload contains a numeric code and text. Use the FormatCloseMessage - // function to format a close message payload. - CloseMessage = 8 - - // PingMessage denotes a ping control message. The optional message payload - // is UTF-8 encoded text. - PingMessage = 9 - - // PongMessage denotes a pong control message. The optional message payload - // is UTF-8 encoded text. - PongMessage = 10 -) - -// ErrCloseSent is returned when the application writes a message to the -// connection after sending a close message. -var ErrCloseSent = errors.New("websocket: close sent") - -// ErrReadLimit is returned when reading a message that is larger than the -// read limit set for the connection. -var ErrReadLimit = errors.New("websocket: read limit exceeded") - -// netError satisfies the net Error interface. -type netError struct { - msg string - temporary bool - timeout bool -} - -func (e *netError) Error() string { return e.msg } -func (e *netError) Temporary() bool { return e.temporary } -func (e *netError) Timeout() bool { return e.timeout } - -// CloseError represents a close message. -type CloseError struct { - // Code is defined in RFC 6455, section 11.7. - Code int - - // Text is the optional text payload. - Text string -} - -func (e *CloseError) Error() string { - s := []byte("websocket: close ") - s = strconv.AppendInt(s, int64(e.Code), 10) - switch e.Code { - case CloseNormalClosure: - s = append(s, " (normal)"...) - case CloseGoingAway: - s = append(s, " (going away)"...) - case CloseProtocolError: - s = append(s, " (protocol error)"...) - case CloseUnsupportedData: - s = append(s, " (unsupported data)"...) - case CloseNoStatusReceived: - s = append(s, " (no status)"...) - case CloseAbnormalClosure: - s = append(s, " (abnormal closure)"...) - case CloseInvalidFramePayloadData: - s = append(s, " (invalid payload data)"...) - case ClosePolicyViolation: - s = append(s, " (policy violation)"...) - case CloseMessageTooBig: - s = append(s, " (message too big)"...) - case CloseMandatoryExtension: - s = append(s, " (mandatory extension missing)"...) - case CloseInternalServerErr: - s = append(s, " (internal server error)"...) - case CloseTLSHandshake: - s = append(s, " (TLS handshake error)"...) - } - if e.Text != "" { - s = append(s, ": "...) - s = append(s, e.Text...) - } - return string(s) -} - -// IsCloseError returns boolean indicating whether the error is a *CloseError -// with one of the specified codes. -func IsCloseError(err error, codes ...int) bool { - if e, ok := err.(*CloseError); ok { - for _, code := range codes { - if e.Code == code { - return true - } - } - } - return false -} - -// IsUnexpectedCloseError returns boolean indicating whether the error is a -// *CloseError with a code not in the list of expected codes. -func IsUnexpectedCloseError(err error, expectedCodes ...int) bool { - if e, ok := err.(*CloseError); ok { - for _, code := range expectedCodes { - if e.Code == code { - return false - } - } - return true - } - return false -} - -var ( - errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true} - errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} - errBadWriteOpCode = errors.New("websocket: bad write message type") - errWriteClosed = errors.New("websocket: write closed") - errInvalidControlFrame = errors.New("websocket: invalid control frame") -) - -func newMaskKey() [4]byte { - n := rand.Uint32() - return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} -} - -func hideTempErr(err error) error { - if e, ok := err.(net.Error); ok && e.Temporary() { - err = &netError{msg: e.Error(), timeout: e.Timeout()} - } - return err -} - -func isControl(frameType int) bool { - return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage -} - -func isData(frameType int) bool { - return frameType == TextMessage || frameType == BinaryMessage -} - -var validReceivedCloseCodes = map[int]bool{ - // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number - - CloseNormalClosure: true, - CloseGoingAway: true, - CloseProtocolError: true, - CloseUnsupportedData: true, - CloseNoStatusReceived: false, - CloseAbnormalClosure: false, - CloseInvalidFramePayloadData: true, - ClosePolicyViolation: true, - CloseMessageTooBig: true, - CloseMandatoryExtension: true, - CloseInternalServerErr: true, - CloseServiceRestart: true, - CloseTryAgainLater: true, - CloseTLSHandshake: false, -} - -func isValidReceivedCloseCode(code int) bool { - return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) -} - -// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this -// interface. The type of the value stored in a pool is not specified. -type BufferPool interface { - // Get gets a value from the pool or returns nil if the pool is empty. - Get() interface{} - // Put adds a value to the pool. - Put(interface{}) -} - -// writePoolData is the type added to the write buffer pool. This wrapper is -// used to prevent applications from peeking at and depending on the values -// added to the pool. -type writePoolData struct{ buf []byte } - -// The Conn type represents a WebSocket connection. -type Conn struct { - conn net.Conn - isServer bool - subprotocol string - - // Write fields - mu chan struct{} // used as mutex to protect write to conn - writeBuf []byte // frame is constructed in this buffer. - writePool BufferPool - writeBufSize int - writeDeadline time.Time - writer io.WriteCloser // the current writer returned to the application - isWriting bool // for best-effort concurrent write detection - - writeErrMu sync.Mutex - writeErr error - - enableWriteCompression bool - compressionLevel int - newCompressionWriter func(io.WriteCloser, int) io.WriteCloser - - // Read fields - reader io.ReadCloser // the current reader returned to the application - readErr error - br *bufio.Reader - // bytes remaining in current frame. - // set setReadRemaining to safely update this value and prevent overflow - readRemaining int64 - readFinal bool // true the current message has more frames. - readLength int64 // Message size. - readLimit int64 // Maximum message size. - readMaskPos int - readMaskKey [4]byte - handlePong func(string) error - handlePing func(string) error - handleClose func(int, string) error - readErrCount int - messageReader *messageReader // the current low-level reader - - readDecompress bool // whether last read frame had RSV1 set - newDecompressionReader func(io.Reader) io.ReadCloser -} - -func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn { - - if br == nil { - if readBufferSize == 0 { - readBufferSize = defaultReadBufferSize - } else if readBufferSize < maxControlFramePayloadSize { - // must be large enough for control frame - readBufferSize = maxControlFramePayloadSize - } - br = bufio.NewReaderSize(conn, readBufferSize) - } - - if writeBufferSize <= 0 { - writeBufferSize = defaultWriteBufferSize - } - writeBufferSize += maxFrameHeaderSize - - if writeBuf == nil && writeBufferPool == nil { - writeBuf = make([]byte, writeBufferSize) - } - - mu := make(chan struct{}, 1) - mu <- struct{}{} - c := &Conn{ - isServer: isServer, - br: br, - conn: conn, - mu: mu, - readFinal: true, - writeBuf: writeBuf, - writePool: writeBufferPool, - writeBufSize: writeBufferSize, - enableWriteCompression: true, - compressionLevel: defaultCompressionLevel, - } - c.SetCloseHandler(nil) - c.SetPingHandler(nil) - c.SetPongHandler(nil) - return c -} - -// setReadRemaining tracks the number of bytes remaining on the connection. If n -// overflows, an ErrReadLimit is returned. -func (c *Conn) setReadRemaining(n int64) error { - if n < 0 { - return ErrReadLimit - } - - c.readRemaining = n - return nil -} - -// Subprotocol returns the negotiated protocol for the connection. -func (c *Conn) Subprotocol() string { - return c.subprotocol -} - -// Close closes the underlying network connection without sending or waiting -// for a close message. -func (c *Conn) Close() error { - return c.conn.Close() -} - -// LocalAddr returns the local network address. -func (c *Conn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -// RemoteAddr returns the remote network address. -func (c *Conn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -// Write methods - -func (c *Conn) writeFatal(err error) error { - err = hideTempErr(err) - c.writeErrMu.Lock() - if c.writeErr == nil { - c.writeErr = err - } - c.writeErrMu.Unlock() - return err -} - -func (c *Conn) read(n int) ([]byte, error) { - p, err := c.br.Peek(n) - if err == io.EOF { - err = errUnexpectedEOF - } - c.br.Discard(len(p)) - return p, err -} - -func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error { - <-c.mu - defer func() { c.mu <- struct{}{} }() - - c.writeErrMu.Lock() - err := c.writeErr - c.writeErrMu.Unlock() - if err != nil { - return err - } - - c.conn.SetWriteDeadline(deadline) - if len(buf1) == 0 { - _, err = c.conn.Write(buf0) - } else { - err = c.writeBufs(buf0, buf1) - } - if err != nil { - return c.writeFatal(err) - } - if frameType == CloseMessage { - c.writeFatal(ErrCloseSent) - } - return nil -} - -func (c *Conn) writeBufs(bufs ...[]byte) error { - b := net.Buffers(bufs) - _, err := b.WriteTo(c.conn) - return err -} - -// WriteControl writes a control message with the given deadline. The allowed -// message types are CloseMessage, PingMessage and PongMessage. -func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { - if !isControl(messageType) { - return errBadWriteOpCode - } - if len(data) > maxControlFramePayloadSize { - return errInvalidControlFrame - } - - b0 := byte(messageType) | finalBit - b1 := byte(len(data)) - if !c.isServer { - b1 |= maskBit - } - - buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) - buf = append(buf, b0, b1) - - if c.isServer { - buf = append(buf, data...) - } else { - key := newMaskKey() - buf = append(buf, key[:]...) - buf = append(buf, data...) - maskBytes(key, 0, buf[6:]) - } - - d := 1000 * time.Hour - if !deadline.IsZero() { - d = deadline.Sub(time.Now()) - if d < 0 { - return errWriteTimeout - } - } - - timer := time.NewTimer(d) - select { - case <-c.mu: - timer.Stop() - case <-timer.C: - return errWriteTimeout - } - defer func() { c.mu <- struct{}{} }() - - c.writeErrMu.Lock() - err := c.writeErr - c.writeErrMu.Unlock() - if err != nil { - return err - } - - c.conn.SetWriteDeadline(deadline) - _, err = c.conn.Write(buf) - if err != nil { - return c.writeFatal(err) - } - if messageType == CloseMessage { - c.writeFatal(ErrCloseSent) - } - return err -} - -// beginMessage prepares a connection and message writer for a new message. -func (c *Conn) beginMessage(mw *messageWriter, messageType int) error { - // Close previous writer if not already closed by the application. It's - // probably better to return an error in this situation, but we cannot - // change this without breaking existing applications. - if c.writer != nil { - c.writer.Close() - c.writer = nil - } - - if !isControl(messageType) && !isData(messageType) { - return errBadWriteOpCode - } - - c.writeErrMu.Lock() - err := c.writeErr - c.writeErrMu.Unlock() - if err != nil { - return err - } - - mw.c = c - mw.frameType = messageType - mw.pos = maxFrameHeaderSize - - if c.writeBuf == nil { - wpd, ok := c.writePool.Get().(writePoolData) - if ok { - c.writeBuf = wpd.buf - } else { - c.writeBuf = make([]byte, c.writeBufSize) - } - } - return nil -} - -// NextWriter returns a writer for the next message to send. The writer's Close -// method flushes the complete message to the network. -// -// There can be at most one open writer on a connection. NextWriter closes the -// previous writer if the application has not already done so. -// -// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and -// PongMessage) are supported. -func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { - var mw messageWriter - if err := c.beginMessage(&mw, messageType); err != nil { - return nil, err - } - c.writer = &mw - if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { - w := c.newCompressionWriter(c.writer, c.compressionLevel) - mw.compress = true - c.writer = w - } - return c.writer, nil -} - -type messageWriter struct { - c *Conn - compress bool // whether next call to flushFrame should set RSV1 - pos int // end of data in writeBuf. - frameType int // type of the current frame. - err error -} - -func (w *messageWriter) endMessage(err error) error { - if w.err != nil { - return err - } - c := w.c - w.err = err - c.writer = nil - if c.writePool != nil { - c.writePool.Put(writePoolData{buf: c.writeBuf}) - c.writeBuf = nil - } - return err -} - -// flushFrame writes buffered data and extra as a frame to the network. The -// final argument indicates that this is the last frame in the message. -func (w *messageWriter) flushFrame(final bool, extra []byte) error { - c := w.c - length := w.pos - maxFrameHeaderSize + len(extra) - - // Check for invalid control frames. - if isControl(w.frameType) && - (!final || length > maxControlFramePayloadSize) { - return w.endMessage(errInvalidControlFrame) - } - - b0 := byte(w.frameType) - if final { - b0 |= finalBit - } - if w.compress { - b0 |= rsv1Bit - } - w.compress = false - - b1 := byte(0) - if !c.isServer { - b1 |= maskBit - } - - // Assume that the frame starts at beginning of c.writeBuf. - framePos := 0 - if c.isServer { - // Adjust up if mask not included in the header. - framePos = 4 - } - - switch { - case length >= 65536: - c.writeBuf[framePos] = b0 - c.writeBuf[framePos+1] = b1 | 127 - binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) - case length > 125: - framePos += 6 - c.writeBuf[framePos] = b0 - c.writeBuf[framePos+1] = b1 | 126 - binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) - default: - framePos += 8 - c.writeBuf[framePos] = b0 - c.writeBuf[framePos+1] = b1 | byte(length) - } - - if !c.isServer { - key := newMaskKey() - copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) - maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) - if len(extra) > 0 { - return w.endMessage(c.writeFatal(errors.New("websocket: internal error, extra used in client mode"))) - } - } - - // Write the buffers to the connection with best-effort detection of - // concurrent writes. See the concurrency section in the package - // documentation for more info. - - if c.isWriting { - panic("concurrent write to websocket connection") - } - c.isWriting = true - - err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra) - - if !c.isWriting { - panic("concurrent write to websocket connection") - } - c.isWriting = false - - if err != nil { - return w.endMessage(err) - } - - if final { - w.endMessage(errWriteClosed) - return nil - } - - // Setup for next frame. - w.pos = maxFrameHeaderSize - w.frameType = continuationFrame - return nil -} - -func (w *messageWriter) ncopy(max int) (int, error) { - n := len(w.c.writeBuf) - w.pos - if n <= 0 { - if err := w.flushFrame(false, nil); err != nil { - return 0, err - } - n = len(w.c.writeBuf) - w.pos - } - if n > max { - n = max - } - return n, nil -} - -func (w *messageWriter) Write(p []byte) (int, error) { - if w.err != nil { - return 0, w.err - } - - if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { - // Don't buffer large messages. - err := w.flushFrame(false, p) - if err != nil { - return 0, err - } - return len(p), nil - } - - nn := len(p) - for len(p) > 0 { - n, err := w.ncopy(len(p)) - if err != nil { - return 0, err - } - copy(w.c.writeBuf[w.pos:], p[:n]) - w.pos += n - p = p[n:] - } - return nn, nil -} - -func (w *messageWriter) WriteString(p string) (int, error) { - if w.err != nil { - return 0, w.err - } - - nn := len(p) - for len(p) > 0 { - n, err := w.ncopy(len(p)) - if err != nil { - return 0, err - } - copy(w.c.writeBuf[w.pos:], p[:n]) - w.pos += n - p = p[n:] - } - return nn, nil -} - -func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { - if w.err != nil { - return 0, w.err - } - for { - if w.pos == len(w.c.writeBuf) { - err = w.flushFrame(false, nil) - if err != nil { - break - } - } - var n int - n, err = r.Read(w.c.writeBuf[w.pos:]) - w.pos += n - nn += int64(n) - if err != nil { - if err == io.EOF { - err = nil - } - break - } - } - return nn, err -} - -func (w *messageWriter) Close() error { - if w.err != nil { - return w.err - } - return w.flushFrame(true, nil) -} - -// WritePreparedMessage writes prepared message into connection. -func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error { - frameType, frameData, err := pm.frame(prepareKey{ - isServer: c.isServer, - compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType), - compressionLevel: c.compressionLevel, - }) - if err != nil { - return err - } - if c.isWriting { - panic("concurrent write to websocket connection") - } - c.isWriting = true - err = c.write(frameType, c.writeDeadline, frameData, nil) - if !c.isWriting { - panic("concurrent write to websocket connection") - } - c.isWriting = false - return err -} - -// WriteMessage is a helper method for getting a writer using NextWriter, -// writing the message and closing the writer. -func (c *Conn) WriteMessage(messageType int, data []byte) error { - - if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { - // Fast path with no allocations and single frame. - - var mw messageWriter - if err := c.beginMessage(&mw, messageType); err != nil { - return err - } - n := copy(c.writeBuf[mw.pos:], data) - mw.pos += n - data = data[n:] - return mw.flushFrame(true, data) - } - - w, err := c.NextWriter(messageType) - if err != nil { - return err - } - if _, err = w.Write(data); err != nil { - return err - } - return w.Close() -} - -// SetWriteDeadline sets the write deadline on the underlying network -// connection. After a write has timed out, the websocket state is corrupt and -// all future writes will return an error. A zero value for t means writes will -// not time out. -func (c *Conn) SetWriteDeadline(t time.Time) error { - c.writeDeadline = t - return nil -} - -// Read methods - -func (c *Conn) advanceFrame() (int, error) { - // 1. Skip remainder of previous frame. - - if c.readRemaining > 0 { - if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { - return noFrame, err - } - } - - // 2. Read and parse first two bytes of frame header. - // To aid debugging, collect and report all errors in the first two bytes - // of the header. - - var errors []string - - p, err := c.read(2) - if err != nil { - return noFrame, err - } - - frameType := int(p[0] & 0xf) - final := p[0]&finalBit != 0 - rsv1 := p[0]&rsv1Bit != 0 - rsv2 := p[0]&rsv2Bit != 0 - rsv3 := p[0]&rsv3Bit != 0 - mask := p[1]&maskBit != 0 - c.setReadRemaining(int64(p[1] & 0x7f)) - - c.readDecompress = false - if rsv1 { - if c.newDecompressionReader != nil { - c.readDecompress = true - } else { - errors = append(errors, "RSV1 set") - } - } - - if rsv2 { - errors = append(errors, "RSV2 set") - } - - if rsv3 { - errors = append(errors, "RSV3 set") - } - - switch frameType { - case CloseMessage, PingMessage, PongMessage: - if c.readRemaining > maxControlFramePayloadSize { - errors = append(errors, "len > 125 for control") - } - if !final { - errors = append(errors, "FIN not set on control") - } - case TextMessage, BinaryMessage: - if !c.readFinal { - errors = append(errors, "data before FIN") - } - c.readFinal = final - case continuationFrame: - if c.readFinal { - errors = append(errors, "continuation after FIN") - } - c.readFinal = final - default: - errors = append(errors, "bad opcode "+strconv.Itoa(frameType)) - } - - if mask != c.isServer { - errors = append(errors, "bad MASK") - } - - if len(errors) > 0 { - return noFrame, c.handleProtocolError(strings.Join(errors, ", ")) - } - - // 3. Read and parse frame length as per - // https://tools.ietf.org/html/rfc6455#section-5.2 - // - // The length of the "Payload data", in bytes: if 0-125, that is the payload - // length. - // - If 126, the following 2 bytes interpreted as a 16-bit unsigned - // integer are the payload length. - // - If 127, the following 8 bytes interpreted as - // a 64-bit unsigned integer (the most significant bit MUST be 0) are the - // payload length. Multibyte length quantities are expressed in network byte - // order. - - switch c.readRemaining { - case 126: - p, err := c.read(2) - if err != nil { - return noFrame, err - } - - if err := c.setReadRemaining(int64(binary.BigEndian.Uint16(p))); err != nil { - return noFrame, err - } - case 127: - p, err := c.read(8) - if err != nil { - return noFrame, err - } - - if err := c.setReadRemaining(int64(binary.BigEndian.Uint64(p))); err != nil { - return noFrame, err - } - } - - // 4. Handle frame masking. - - if mask { - c.readMaskPos = 0 - p, err := c.read(len(c.readMaskKey)) - if err != nil { - return noFrame, err - } - copy(c.readMaskKey[:], p) - } - - // 5. For text and binary messages, enforce read limit and return. - - if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { - - c.readLength += c.readRemaining - // Don't allow readLength to overflow in the presence of a large readRemaining - // counter. - if c.readLength < 0 { - return noFrame, ErrReadLimit - } - - if c.readLimit > 0 && c.readLength > c.readLimit { - c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) - return noFrame, ErrReadLimit - } - - return frameType, nil - } - - // 6. Read control frame payload. - - var payload []byte - if c.readRemaining > 0 { - payload, err = c.read(int(c.readRemaining)) - c.setReadRemaining(0) - if err != nil { - return noFrame, err - } - if c.isServer { - maskBytes(c.readMaskKey, 0, payload) - } - } - - // 7. Process control frame payload. - - switch frameType { - case PongMessage: - if err := c.handlePong(string(payload)); err != nil { - return noFrame, err - } - case PingMessage: - if err := c.handlePing(string(payload)); err != nil { - return noFrame, err - } - case CloseMessage: - closeCode := CloseNoStatusReceived - closeText := "" - if len(payload) >= 2 { - closeCode = int(binary.BigEndian.Uint16(payload)) - if !isValidReceivedCloseCode(closeCode) { - return noFrame, c.handleProtocolError("bad close code " + strconv.Itoa(closeCode)) - } - closeText = string(payload[2:]) - if !utf8.ValidString(closeText) { - return noFrame, c.handleProtocolError("invalid utf8 payload in close frame") - } - } - if err := c.handleClose(closeCode, closeText); err != nil { - return noFrame, err - } - return noFrame, &CloseError{Code: closeCode, Text: closeText} - } - - return frameType, nil -} - -func (c *Conn) handleProtocolError(message string) error { - data := FormatCloseMessage(CloseProtocolError, message) - if len(data) > maxControlFramePayloadSize { - data = data[:maxControlFramePayloadSize] - } - c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)) - return errors.New("websocket: " + message) -} - -// NextReader returns the next data message received from the peer. The -// returned messageType is either TextMessage or BinaryMessage. -// -// There can be at most one open reader on a connection. NextReader discards -// the previous message if the application has not already consumed it. -// -// Applications must break out of the application's read loop when this method -// returns a non-nil error value. Errors returned from this method are -// permanent. Once this method returns a non-nil error, all subsequent calls to -// this method return the same error. -func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { - // Close previous reader, only relevant for decompression. - if c.reader != nil { - c.reader.Close() - c.reader = nil - } - - c.messageReader = nil - c.readLength = 0 - - for c.readErr == nil { - frameType, err := c.advanceFrame() - if err != nil { - c.readErr = hideTempErr(err) - break - } - - if frameType == TextMessage || frameType == BinaryMessage { - c.messageReader = &messageReader{c} - c.reader = c.messageReader - if c.readDecompress { - c.reader = c.newDecompressionReader(c.reader) - } - return frameType, c.reader, nil - } - } - - // Applications that do handle the error returned from this method spin in - // tight loop on connection failure. To help application developers detect - // this error, panic on repeated reads to the failed connection. - c.readErrCount++ - if c.readErrCount >= 1000 { - panic("repeated read on failed websocket connection") - } - - return noFrame, nil, c.readErr -} - -type messageReader struct{ c *Conn } - -func (r *messageReader) Read(b []byte) (int, error) { - c := r.c - if c.messageReader != r { - return 0, io.EOF - } - - for c.readErr == nil { - - if c.readRemaining > 0 { - if int64(len(b)) > c.readRemaining { - b = b[:c.readRemaining] - } - n, err := c.br.Read(b) - c.readErr = hideTempErr(err) - if c.isServer { - c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) - } - rem := c.readRemaining - rem -= int64(n) - c.setReadRemaining(rem) - if c.readRemaining > 0 && c.readErr == io.EOF { - c.readErr = errUnexpectedEOF - } - return n, c.readErr - } - - if c.readFinal { - c.messageReader = nil - return 0, io.EOF - } - - frameType, err := c.advanceFrame() - switch { - case err != nil: - c.readErr = hideTempErr(err) - case frameType == TextMessage || frameType == BinaryMessage: - c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") - } - } - - err := c.readErr - if err == io.EOF && c.messageReader == r { - err = errUnexpectedEOF - } - return 0, err -} - -func (r *messageReader) Close() error { - return nil -} - -// ReadMessage is a helper method for getting a reader using NextReader and -// reading from that reader to a buffer. -func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { - var r io.Reader - messageType, r, err = c.NextReader() - if err != nil { - return messageType, nil, err - } - p, err = ioutil.ReadAll(r) - return messageType, p, err -} - -// SetReadDeadline sets the read deadline on the underlying network connection. -// After a read has timed out, the websocket connection state is corrupt and -// all future reads will return an error. A zero value for t means reads will -// not time out. -func (c *Conn) SetReadDeadline(t time.Time) error { - return c.conn.SetReadDeadline(t) -} - -// SetReadLimit sets the maximum size in bytes for a message read from the peer. If a -// message exceeds the limit, the connection sends a close message to the peer -// and returns ErrReadLimit to the application. -func (c *Conn) SetReadLimit(limit int64) { - c.readLimit = limit -} - -// CloseHandler returns the current close handler -func (c *Conn) CloseHandler() func(code int, text string) error { - return c.handleClose -} - -// SetCloseHandler sets the handler for close messages received from the peer. -// The code argument to h is the received close code or CloseNoStatusReceived -// if the close message is empty. The default close handler sends a close -// message back to the peer. -// -// The handler function is called from the NextReader, ReadMessage and message -// reader Read methods. The application must read the connection to process -// close messages as described in the section on Control Messages above. -// -// The connection read methods return a CloseError when a close message is -// received. Most applications should handle close messages as part of their -// normal error handling. Applications should only set a close handler when the -// application must perform some action before sending a close message back to -// the peer. -func (c *Conn) SetCloseHandler(h func(code int, text string) error) { - if h == nil { - h = func(code int, text string) error { - message := FormatCloseMessage(code, "") - c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) - return nil - } - } - c.handleClose = h -} - -// PingHandler returns the current ping handler -func (c *Conn) PingHandler() func(appData string) error { - return c.handlePing -} - -// SetPingHandler sets the handler for ping messages received from the peer. -// The appData argument to h is the PING message application data. The default -// ping handler sends a pong to the peer. -// -// The handler function is called from the NextReader, ReadMessage and message -// reader Read methods. The application must read the connection to process -// ping messages as described in the section on Control Messages above. -func (c *Conn) SetPingHandler(h func(appData string) error) { - if h == nil { - h = func(message string) error { - err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) - if err == ErrCloseSent { - return nil - } else if e, ok := err.(net.Error); ok && e.Temporary() { - return nil - } - return err - } - } - c.handlePing = h -} - -// PongHandler returns the current pong handler -func (c *Conn) PongHandler() func(appData string) error { - return c.handlePong -} - -// SetPongHandler sets the handler for pong messages received from the peer. -// The appData argument to h is the PONG message application data. The default -// pong handler does nothing. -// -// The handler function is called from the NextReader, ReadMessage and message -// reader Read methods. The application must read the connection to process -// pong messages as described in the section on Control Messages above. -func (c *Conn) SetPongHandler(h func(appData string) error) { - if h == nil { - h = func(string) error { return nil } - } - c.handlePong = h -} - -// UnderlyingConn returns the internal net.Conn. This can be used to further -// modifications to connection specific flags. -func (c *Conn) UnderlyingConn() net.Conn { - return c.conn -} - -// EnableWriteCompression enables and disables write compression of -// subsequent text and binary messages. This function is a noop if -// compression was not negotiated with the peer. -func (c *Conn) EnableWriteCompression(enable bool) { - c.enableWriteCompression = enable -} - -// SetCompressionLevel sets the flate compression level for subsequent text and -// binary messages. This function is a noop if compression was not negotiated -// with the peer. See the compress/flate package for a description of -// compression levels. -func (c *Conn) SetCompressionLevel(level int) error { - if !isValidCompressionLevel(level) { - return errors.New("websocket: invalid compression level") - } - c.compressionLevel = level - return nil -} - -// FormatCloseMessage formats closeCode and text as a WebSocket close message. -// An empty message is returned for code CloseNoStatusReceived. -func FormatCloseMessage(closeCode int, text string) []byte { - if closeCode == CloseNoStatusReceived { - // Return empty message because it's illegal to send - // CloseNoStatusReceived. Return non-nil value in case application - // checks for nil. - return []byte{} - } - buf := make([]byte, 2+len(text)) - binary.BigEndian.PutUint16(buf, uint16(closeCode)) - copy(buf[2:], text) - return buf -} diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go deleted file mode 100644 index 8db0cef9..00000000 --- a/vendor/github.com/gorilla/websocket/doc.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package websocket implements the WebSocket protocol defined in RFC 6455. -// -// Overview -// -// The Conn type represents a WebSocket connection. A server application calls -// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn: -// -// var upgrader = websocket.Upgrader{ -// ReadBufferSize: 1024, -// WriteBufferSize: 1024, -// } -// -// func handler(w http.ResponseWriter, r *http.Request) { -// conn, err := upgrader.Upgrade(w, r, nil) -// if err != nil { -// log.Println(err) -// return -// } -// ... Use conn to send and receive messages. -// } -// -// Call the connection's WriteMessage and ReadMessage methods to send and -// receive messages as a slice of bytes. This snippet of code shows how to echo -// messages using these methods: -// -// for { -// messageType, p, err := conn.ReadMessage() -// if err != nil { -// log.Println(err) -// return -// } -// if err := conn.WriteMessage(messageType, p); err != nil { -// log.Println(err) -// return -// } -// } -// -// In above snippet of code, p is a []byte and messageType is an int with value -// websocket.BinaryMessage or websocket.TextMessage. -// -// An application can also send and receive messages using the io.WriteCloser -// and io.Reader interfaces. To send a message, call the connection NextWriter -// method to get an io.WriteCloser, write the message to the writer and close -// the writer when done. To receive a message, call the connection NextReader -// method to get an io.Reader and read until io.EOF is returned. This snippet -// shows how to echo messages using the NextWriter and NextReader methods: -// -// for { -// messageType, r, err := conn.NextReader() -// if err != nil { -// return -// } -// w, err := conn.NextWriter(messageType) -// if err != nil { -// return err -// } -// if _, err := io.Copy(w, r); err != nil { -// return err -// } -// if err := w.Close(); err != nil { -// return err -// } -// } -// -// Data Messages -// -// The WebSocket protocol distinguishes between text and binary data messages. -// Text messages are interpreted as UTF-8 encoded text. The interpretation of -// binary messages is left to the application. -// -// This package uses the TextMessage and BinaryMessage integer constants to -// identify the two data message types. The ReadMessage and NextReader methods -// return the type of the received message. The messageType argument to the -// WriteMessage and NextWriter methods specifies the type of a sent message. -// -// It is the application's responsibility to ensure that text messages are -// valid UTF-8 encoded text. -// -// Control Messages -// -// The WebSocket protocol defines three types of control messages: close, ping -// and pong. Call the connection WriteControl, WriteMessage or NextWriter -// methods to send a control message to the peer. -// -// Connections handle received close messages by calling the handler function -// set with the SetCloseHandler method and by returning a *CloseError from the -// NextReader, ReadMessage or the message Read method. The default close -// handler sends a close message to the peer. -// -// Connections handle received ping messages by calling the handler function -// set with the SetPingHandler method. The default ping handler sends a pong -// message to the peer. -// -// Connections handle received pong messages by calling the handler function -// set with the SetPongHandler method. The default pong handler does nothing. -// If an application sends ping messages, then the application should set a -// pong handler to receive the corresponding pong. -// -// The control message handler functions are called from the NextReader, -// ReadMessage and message reader Read methods. The default close and ping -// handlers can block these methods for a short time when the handler writes to -// the connection. -// -// The application must read the connection to process close, ping and pong -// messages sent from the peer. If the application is not otherwise interested -// in messages from the peer, then the application should start a goroutine to -// read and discard messages from the peer. A simple example is: -// -// func readLoop(c *websocket.Conn) { -// for { -// if _, _, err := c.NextReader(); err != nil { -// c.Close() -// break -// } -// } -// } -// -// Concurrency -// -// Connections support one concurrent reader and one concurrent writer. -// -// Applications are responsible for ensuring that no more than one goroutine -// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, -// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and -// that no more than one goroutine calls the read methods (NextReader, -// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler) -// concurrently. -// -// The Close and WriteControl methods can be called concurrently with all other -// methods. -// -// Origin Considerations -// -// Web browsers allow Javascript applications to open a WebSocket connection to -// any host. It's up to the server to enforce an origin policy using the Origin -// request header sent by the browser. -// -// The Upgrader calls the function specified in the CheckOrigin field to check -// the origin. If the CheckOrigin function returns false, then the Upgrade -// method fails the WebSocket handshake with HTTP status 403. -// -// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail -// the handshake if the Origin request header is present and the Origin host is -// not equal to the Host request header. -// -// The deprecated package-level Upgrade function does not perform origin -// checking. The application is responsible for checking the Origin header -// before calling the Upgrade function. -// -// Buffers -// -// Connections buffer network input and output to reduce the number -// of system calls when reading or writing messages. -// -// Write buffers are also used for constructing WebSocket frames. See RFC 6455, -// Section 5 for a discussion of message framing. A WebSocket frame header is -// written to the network each time a write buffer is flushed to the network. -// Decreasing the size of the write buffer can increase the amount of framing -// overhead on the connection. -// -// The buffer sizes in bytes are specified by the ReadBufferSize and -// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default -// size of 4096 when a buffer size field is set to zero. The Upgrader reuses -// buffers created by the HTTP server when a buffer size field is set to zero. -// The HTTP server buffers have a size of 4096 at the time of this writing. -// -// The buffer sizes do not limit the size of a message that can be read or -// written by a connection. -// -// Buffers are held for the lifetime of the connection by default. If the -// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the -// write buffer only when writing a message. -// -// Applications should tune the buffer sizes to balance memory use and -// performance. Increasing the buffer size uses more memory, but can reduce the -// number of system calls to read or write the network. In the case of writing, -// increasing the buffer size can reduce the number of frame headers written to -// the network. -// -// Some guidelines for setting buffer parameters are: -// -// Limit the buffer sizes to the maximum expected message size. Buffers larger -// than the largest message do not provide any benefit. -// -// Depending on the distribution of message sizes, setting the buffer size to -// a value less than the maximum expected message size can greatly reduce memory -// use with a small impact on performance. Here's an example: If 99% of the -// messages are smaller than 256 bytes and the maximum message size is 512 -// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls -// than a buffer size of 512 bytes. The memory savings is 50%. -// -// A write buffer pool is useful when the application has a modest number -// writes over a large number of connections. when buffers are pooled, a larger -// buffer size has a reduced impact on total memory use and has the benefit of -// reducing system calls and frame overhead. -// -// Compression EXPERIMENTAL -// -// Per message compression extensions (RFC 7692) are experimentally supported -// by this package in a limited capacity. Setting the EnableCompression option -// to true in Dialer or Upgrader will attempt to negotiate per message deflate -// support. -// -// var upgrader = websocket.Upgrader{ -// EnableCompression: true, -// } -// -// If compression was successfully negotiated with the connection's peer, any -// message received in compressed form will be automatically decompressed. -// All Read methods will return uncompressed bytes. -// -// Per message compression of messages written to a connection can be enabled -// or disabled by calling the corresponding Conn method: -// -// conn.EnableWriteCompression(false) -// -// Currently this package does not support compression with "context takeover". -// This means that messages must be compressed and decompressed in isolation, -// without retaining sliding window or dictionary state across messages. For -// more details refer to RFC 7692. -// -// Use of compression is experimental and may result in decreased performance. -package websocket diff --git a/vendor/github.com/gorilla/websocket/join.go b/vendor/github.com/gorilla/websocket/join.go deleted file mode 100644 index c64f8c82..00000000 --- a/vendor/github.com/gorilla/websocket/join.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "io" - "strings" -) - -// JoinMessages concatenates received messages to create a single io.Reader. -// The string term is appended to each message. The returned reader does not -// support concurrent calls to the Read method. -func JoinMessages(c *Conn, term string) io.Reader { - return &joinReader{c: c, term: term} -} - -type joinReader struct { - c *Conn - term string - r io.Reader -} - -func (r *joinReader) Read(p []byte) (int, error) { - if r.r == nil { - var err error - _, r.r, err = r.c.NextReader() - if err != nil { - return 0, err - } - if r.term != "" { - r.r = io.MultiReader(r.r, strings.NewReader(r.term)) - } - } - n, err := r.r.Read(p) - if err == io.EOF { - err = nil - r.r = nil - } - return n, err -} diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go deleted file mode 100644 index dc2c1f64..00000000 --- a/vendor/github.com/gorilla/websocket/json.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "encoding/json" - "io" -) - -// WriteJSON writes the JSON encoding of v as a message. -// -// Deprecated: Use c.WriteJSON instead. -func WriteJSON(c *Conn, v interface{}) error { - return c.WriteJSON(v) -} - -// WriteJSON writes the JSON encoding of v as a message. -// -// See the documentation for encoding/json Marshal for details about the -// conversion of Go values to JSON. -func (c *Conn) WriteJSON(v interface{}) error { - w, err := c.NextWriter(TextMessage) - if err != nil { - return err - } - err1 := json.NewEncoder(w).Encode(v) - err2 := w.Close() - if err1 != nil { - return err1 - } - return err2 -} - -// ReadJSON reads the next JSON-encoded message from the connection and stores -// it in the value pointed to by v. -// -// Deprecated: Use c.ReadJSON instead. -func ReadJSON(c *Conn, v interface{}) error { - return c.ReadJSON(v) -} - -// ReadJSON reads the next JSON-encoded message from the connection and stores -// it in the value pointed to by v. -// -// See the documentation for the encoding/json Unmarshal function for details -// about the conversion of JSON to a Go value. -func (c *Conn) ReadJSON(v interface{}) error { - _, r, err := c.NextReader() - if err != nil { - return err - } - err = json.NewDecoder(r).Decode(v) - if err == io.EOF { - // One value is expected in the message. - err = io.ErrUnexpectedEOF - } - return err -} diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go deleted file mode 100644 index d0742bf2..00000000 --- a/vendor/github.com/gorilla/websocket/mask.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of -// this source code is governed by a BSD-style license that can be found in the -// LICENSE file. - -//go:build !appengine -// +build !appengine - -package websocket - -import "unsafe" - -const wordSize = int(unsafe.Sizeof(uintptr(0))) - -func maskBytes(key [4]byte, pos int, b []byte) int { - // Mask one byte at a time for small buffers. - if len(b) < 2*wordSize { - for i := range b { - b[i] ^= key[pos&3] - pos++ - } - return pos & 3 - } - - // Mask one byte at a time to word boundary. - if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { - n = wordSize - n - for i := range b[:n] { - b[i] ^= key[pos&3] - pos++ - } - b = b[n:] - } - - // Create aligned word size key. - var k [wordSize]byte - for i := range k { - k[i] = key[(pos+i)&3] - } - kw := *(*uintptr)(unsafe.Pointer(&k)) - - // Mask one word at a time. - n := (len(b) / wordSize) * wordSize - for i := 0; i < n; i += wordSize { - *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw - } - - // Mask one byte at a time for remaining bytes. - b = b[n:] - for i := range b { - b[i] ^= key[pos&3] - pos++ - } - - return pos & 3 -} diff --git a/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/gorilla/websocket/mask_safe.go deleted file mode 100644 index 36250ca7..00000000 --- a/vendor/github.com/gorilla/websocket/mask_safe.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of -// this source code is governed by a BSD-style license that can be found in the -// LICENSE file. - -//go:build appengine -// +build appengine - -package websocket - -func maskBytes(key [4]byte, pos int, b []byte) int { - for i := range b { - b[i] ^= key[pos&3] - pos++ - } - return pos & 3 -} diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go deleted file mode 100644 index c854225e..00000000 --- a/vendor/github.com/gorilla/websocket/prepared.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bytes" - "net" - "sync" - "time" -) - -// PreparedMessage caches on the wire representations of a message payload. -// Use PreparedMessage to efficiently send a message payload to multiple -// connections. PreparedMessage is especially useful when compression is used -// because the CPU and memory expensive compression operation can be executed -// once for a given set of compression options. -type PreparedMessage struct { - messageType int - data []byte - mu sync.Mutex - frames map[prepareKey]*preparedFrame -} - -// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage. -type prepareKey struct { - isServer bool - compress bool - compressionLevel int -} - -// preparedFrame contains data in wire representation. -type preparedFrame struct { - once sync.Once - data []byte -} - -// NewPreparedMessage returns an initialized PreparedMessage. You can then send -// it to connection using WritePreparedMessage method. Valid wire -// representation will be calculated lazily only once for a set of current -// connection options. -func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) { - pm := &PreparedMessage{ - messageType: messageType, - frames: make(map[prepareKey]*preparedFrame), - data: data, - } - - // Prepare a plain server frame. - _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false}) - if err != nil { - return nil, err - } - - // To protect against caller modifying the data argument, remember the data - // copied to the plain server frame. - pm.data = frameData[len(frameData)-len(data):] - return pm, nil -} - -func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { - pm.mu.Lock() - frame, ok := pm.frames[key] - if !ok { - frame = &preparedFrame{} - pm.frames[key] = frame - } - pm.mu.Unlock() - - var err error - frame.once.Do(func() { - // Prepare a frame using a 'fake' connection. - // TODO: Refactor code in conn.go to allow more direct construction of - // the frame. - mu := make(chan struct{}, 1) - mu <- struct{}{} - var nc prepareConn - c := &Conn{ - conn: &nc, - mu: mu, - isServer: key.isServer, - compressionLevel: key.compressionLevel, - enableWriteCompression: true, - writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize), - } - if key.compress { - c.newCompressionWriter = compressNoContextTakeover - } - err = c.WriteMessage(pm.messageType, pm.data) - frame.data = nc.buf.Bytes() - }) - return pm.messageType, frame.data, err -} - -type prepareConn struct { - buf bytes.Buffer - net.Conn -} - -func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) } -func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go deleted file mode 100644 index e0f466b7..00000000 --- a/vendor/github.com/gorilla/websocket/proxy.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bufio" - "encoding/base64" - "errors" - "net" - "net/http" - "net/url" - "strings" -) - -type netDialerFunc func(network, addr string) (net.Conn, error) - -func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { - return fn(network, addr) -} - -func init() { - proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) { - return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil - }) -} - -type httpProxyDialer struct { - proxyURL *url.URL - forwardDial func(network, addr string) (net.Conn, error) -} - -func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) { - hostPort, _ := hostPortNoPort(hpd.proxyURL) - conn, err := hpd.forwardDial(network, hostPort) - if err != nil { - return nil, err - } - - connectHeader := make(http.Header) - if user := hpd.proxyURL.User; user != nil { - proxyUser := user.Username() - if proxyPassword, passwordSet := user.Password(); passwordSet { - credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) - connectHeader.Set("Proxy-Authorization", "Basic "+credential) - } - } - - connectReq := &http.Request{ - Method: http.MethodConnect, - URL: &url.URL{Opaque: addr}, - Host: addr, - Header: connectHeader, - } - - if err := connectReq.Write(conn); err != nil { - conn.Close() - return nil, err - } - - // Read response. It's OK to use and discard buffered reader here becaue - // the remote server does not speak until spoken to. - br := bufio.NewReader(conn) - resp, err := http.ReadResponse(br, connectReq) - if err != nil { - conn.Close() - return nil, err - } - - if resp.StatusCode != 200 { - conn.Close() - f := strings.SplitN(resp.Status, " ", 2) - return nil, errors.New(f[1]) - } - return conn, nil -} diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go deleted file mode 100644 index 24d53b38..00000000 --- a/vendor/github.com/gorilla/websocket/server.go +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bufio" - "errors" - "io" - "net/http" - "net/url" - "strings" - "time" -) - -// HandshakeError describes an error with the handshake from the peer. -type HandshakeError struct { - message string -} - -func (e HandshakeError) Error() string { return e.message } - -// Upgrader specifies parameters for upgrading an HTTP connection to a -// WebSocket connection. -// -// It is safe to call Upgrader's methods concurrently. -type Upgrader struct { - // HandshakeTimeout specifies the duration for the handshake to complete. - HandshakeTimeout time.Duration - - // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer - // size is zero, then buffers allocated by the HTTP server are used. The - // I/O buffer sizes do not limit the size of the messages that can be sent - // or received. - ReadBufferSize, WriteBufferSize int - - // WriteBufferPool is a pool of buffers for write operations. If the value - // is not set, then write buffers are allocated to the connection for the - // lifetime of the connection. - // - // A pool is most useful when the application has a modest volume of writes - // across a large number of connections. - // - // Applications should use a single pool for each unique value of - // WriteBufferSize. - WriteBufferPool BufferPool - - // Subprotocols specifies the server's supported protocols in order of - // preference. If this field is not nil, then the Upgrade method negotiates a - // subprotocol by selecting the first match in this list with a protocol - // requested by the client. If there's no match, then no protocol is - // negotiated (the Sec-Websocket-Protocol header is not included in the - // handshake response). - Subprotocols []string - - // Error specifies the function for generating HTTP error responses. If Error - // is nil, then http.Error is used to generate the HTTP response. - Error func(w http.ResponseWriter, r *http.Request, status int, reason error) - - // CheckOrigin returns true if the request Origin header is acceptable. If - // CheckOrigin is nil, then a safe default is used: return false if the - // Origin request header is present and the origin host is not equal to - // request Host header. - // - // A CheckOrigin function should carefully validate the request origin to - // prevent cross-site request forgery. - CheckOrigin func(r *http.Request) bool - - // EnableCompression specify if the server should attempt to negotiate per - // message compression (RFC 7692). Setting this value to true does not - // guarantee that compression will be supported. Currently only "no context - // takeover" modes are supported. - EnableCompression bool -} - -func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { - err := HandshakeError{reason} - if u.Error != nil { - u.Error(w, r, status, err) - } else { - w.Header().Set("Sec-Websocket-Version", "13") - http.Error(w, http.StatusText(status), status) - } - return nil, err -} - -// checkSameOrigin returns true if the origin is not set or is equal to the request host. -func checkSameOrigin(r *http.Request) bool { - origin := r.Header["Origin"] - if len(origin) == 0 { - return true - } - u, err := url.Parse(origin[0]) - if err != nil { - return false - } - return equalASCIIFold(u.Host, r.Host) -} - -func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { - if u.Subprotocols != nil { - clientProtocols := Subprotocols(r) - for _, serverProtocol := range u.Subprotocols { - for _, clientProtocol := range clientProtocols { - if clientProtocol == serverProtocol { - return clientProtocol - } - } - } - } else if responseHeader != nil { - return responseHeader.Get("Sec-Websocket-Protocol") - } - return "" -} - -// Upgrade upgrades the HTTP server connection to the WebSocket protocol. -// -// The responseHeader is included in the response to the client's upgrade -// request. Use the responseHeader to specify cookies (Set-Cookie). To specify -// subprotocols supported by the server, set Upgrader.Subprotocols directly. -// -// If the upgrade fails, then Upgrade replies to the client with an HTTP error -// response. -func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { - const badHandshake = "websocket: the client is not using the websocket protocol: " - - if !tokenListContainsValue(r.Header, "Connection", "upgrade") { - return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header") - } - - if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { - return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header") - } - - if r.Method != http.MethodGet { - return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET") - } - - if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { - return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") - } - - if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { - return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported") - } - - checkOrigin := u.CheckOrigin - if checkOrigin == nil { - checkOrigin = checkSameOrigin - } - if !checkOrigin(r) { - return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin") - } - - challengeKey := r.Header.Get("Sec-Websocket-Key") - if challengeKey == "" { - return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank") - } - - subprotocol := u.selectSubprotocol(r, responseHeader) - - // Negotiate PMCE - var compress bool - if u.EnableCompression { - for _, ext := range parseExtensions(r.Header) { - if ext[""] != "permessage-deflate" { - continue - } - compress = true - break - } - } - - h, ok := w.(http.Hijacker) - if !ok { - return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") - } - var brw *bufio.ReadWriter - netConn, brw, err := h.Hijack() - if err != nil { - return u.returnError(w, r, http.StatusInternalServerError, err.Error()) - } - - if brw.Reader.Buffered() > 0 { - netConn.Close() - return nil, errors.New("websocket: client sent data before handshake is complete") - } - - var br *bufio.Reader - if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 { - // Reuse hijacked buffered reader as connection reader. - br = brw.Reader - } - - buf := bufioWriterBuffer(netConn, brw.Writer) - - var writeBuf []byte - if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 { - // Reuse hijacked write buffer as connection buffer. - writeBuf = buf - } - - c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf) - c.subprotocol = subprotocol - - if compress { - c.newCompressionWriter = compressNoContextTakeover - c.newDecompressionReader = decompressNoContextTakeover - } - - // Use larger of hijacked buffer and connection write buffer for header. - p := buf - if len(c.writeBuf) > len(p) { - p = c.writeBuf - } - p = p[:0] - - p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) - p = append(p, computeAcceptKey(challengeKey)...) - p = append(p, "\r\n"...) - if c.subprotocol != "" { - p = append(p, "Sec-WebSocket-Protocol: "...) - p = append(p, c.subprotocol...) - p = append(p, "\r\n"...) - } - if compress { - p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) - } - for k, vs := range responseHeader { - if k == "Sec-Websocket-Protocol" { - continue - } - for _, v := range vs { - p = append(p, k...) - p = append(p, ": "...) - for i := 0; i < len(v); i++ { - b := v[i] - if b <= 31 { - // prevent response splitting. - b = ' ' - } - p = append(p, b) - } - p = append(p, "\r\n"...) - } - } - p = append(p, "\r\n"...) - - // Clear deadlines set by HTTP server. - netConn.SetDeadline(time.Time{}) - - if u.HandshakeTimeout > 0 { - netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) - } - if _, err = netConn.Write(p); err != nil { - netConn.Close() - return nil, err - } - if u.HandshakeTimeout > 0 { - netConn.SetWriteDeadline(time.Time{}) - } - - return c, nil -} - -// Upgrade upgrades the HTTP server connection to the WebSocket protocol. -// -// Deprecated: Use websocket.Upgrader instead. -// -// Upgrade does not perform origin checking. The application is responsible for -// checking the Origin header before calling Upgrade. An example implementation -// of the same origin policy check is: -// -// if req.Header.Get("Origin") != "http://"+req.Host { -// http.Error(w, "Origin not allowed", http.StatusForbidden) -// return -// } -// -// If the endpoint supports subprotocols, then the application is responsible -// for negotiating the protocol used on the connection. Use the Subprotocols() -// function to get the subprotocols requested by the client. Use the -// Sec-Websocket-Protocol response header to specify the subprotocol selected -// by the application. -// -// The responseHeader is included in the response to the client's upgrade -// request. Use the responseHeader to specify cookies (Set-Cookie) and the -// negotiated subprotocol (Sec-Websocket-Protocol). -// -// The connection buffers IO to the underlying network connection. The -// readBufSize and writeBufSize parameters specify the size of the buffers to -// use. Messages can be larger than the buffers. -// -// If the request is not a valid WebSocket handshake, then Upgrade returns an -// error of type HandshakeError. Applications should handle this error by -// replying to the client with an HTTP error response. -func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { - u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} - u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { - // don't return errors to maintain backwards compatibility - } - u.CheckOrigin = func(r *http.Request) bool { - // allow all connections by default - return true - } - return u.Upgrade(w, r, responseHeader) -} - -// Subprotocols returns the subprotocols requested by the client in the -// Sec-Websocket-Protocol header. -func Subprotocols(r *http.Request) []string { - h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) - if h == "" { - return nil - } - protocols := strings.Split(h, ",") - for i := range protocols { - protocols[i] = strings.TrimSpace(protocols[i]) - } - return protocols -} - -// IsWebSocketUpgrade returns true if the client requested upgrade to the -// WebSocket protocol. -func IsWebSocketUpgrade(r *http.Request) bool { - return tokenListContainsValue(r.Header, "Connection", "upgrade") && - tokenListContainsValue(r.Header, "Upgrade", "websocket") -} - -// bufioReaderSize size returns the size of a bufio.Reader. -func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int { - // This code assumes that peek on a reset reader returns - // bufio.Reader.buf[:0]. - // TODO: Use bufio.Reader.Size() after Go 1.10 - br.Reset(originalReader) - if p, err := br.Peek(0); err == nil { - return cap(p) - } - return 0 -} - -// writeHook is an io.Writer that records the last slice passed to it vio -// io.Writer.Write. -type writeHook struct { - p []byte -} - -func (wh *writeHook) Write(p []byte) (int, error) { - wh.p = p - return len(p), nil -} - -// bufioWriterBuffer grabs the buffer from a bufio.Writer. -func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { - // This code assumes that bufio.Writer.buf[:1] is passed to the - // bufio.Writer's underlying writer. - var wh writeHook - bw.Reset(&wh) - bw.WriteByte(0) - bw.Flush() - - bw.Reset(originalWriter) - - return wh.p[:cap(wh.p)] -} diff --git a/vendor/github.com/gorilla/websocket/tls_handshake.go b/vendor/github.com/gorilla/websocket/tls_handshake.go deleted file mode 100644 index a62b68cc..00000000 --- a/vendor/github.com/gorilla/websocket/tls_handshake.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build go1.17 -// +build go1.17 - -package websocket - -import ( - "context" - "crypto/tls" -) - -func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { - if err := tlsConn.HandshakeContext(ctx); err != nil { - return err - } - if !cfg.InsecureSkipVerify { - if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/gorilla/websocket/tls_handshake_116.go b/vendor/github.com/gorilla/websocket/tls_handshake_116.go deleted file mode 100644 index e1b2b44f..00000000 --- a/vendor/github.com/gorilla/websocket/tls_handshake_116.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build !go1.17 -// +build !go1.17 - -package websocket - -import ( - "context" - "crypto/tls" -) - -func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { - if err := tlsConn.Handshake(); err != nil { - return err - } - if !cfg.InsecureSkipVerify { - if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go deleted file mode 100644 index 7bf2f66c..00000000 --- a/vendor/github.com/gorilla/websocket/util.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "crypto/rand" - "crypto/sha1" - "encoding/base64" - "io" - "net/http" - "strings" - "unicode/utf8" -) - -var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") - -func computeAcceptKey(challengeKey string) string { - h := sha1.New() - h.Write([]byte(challengeKey)) - h.Write(keyGUID) - return base64.StdEncoding.EncodeToString(h.Sum(nil)) -} - -func generateChallengeKey() (string, error) { - p := make([]byte, 16) - if _, err := io.ReadFull(rand.Reader, p); err != nil { - return "", err - } - return base64.StdEncoding.EncodeToString(p), nil -} - -// Token octets per RFC 2616. -var isTokenOctet = [256]bool{ - '!': true, - '#': true, - '$': true, - '%': true, - '&': true, - '\'': true, - '*': true, - '+': true, - '-': true, - '.': true, - '0': true, - '1': true, - '2': true, - '3': true, - '4': true, - '5': true, - '6': true, - '7': true, - '8': true, - '9': true, - 'A': true, - 'B': true, - 'C': true, - 'D': true, - 'E': true, - 'F': true, - 'G': true, - 'H': true, - 'I': true, - 'J': true, - 'K': true, - 'L': true, - 'M': true, - 'N': true, - 'O': true, - 'P': true, - 'Q': true, - 'R': true, - 'S': true, - 'T': true, - 'U': true, - 'W': true, - 'V': true, - 'X': true, - 'Y': true, - 'Z': true, - '^': true, - '_': true, - '`': true, - 'a': true, - 'b': true, - 'c': true, - 'd': true, - 'e': true, - 'f': true, - 'g': true, - 'h': true, - 'i': true, - 'j': true, - 'k': true, - 'l': true, - 'm': true, - 'n': true, - 'o': true, - 'p': true, - 'q': true, - 'r': true, - 's': true, - 't': true, - 'u': true, - 'v': true, - 'w': true, - 'x': true, - 'y': true, - 'z': true, - '|': true, - '~': true, -} - -// skipSpace returns a slice of the string s with all leading RFC 2616 linear -// whitespace removed. -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if b := s[i]; b != ' ' && b != '\t' { - break - } - } - return s[i:] -} - -// nextToken returns the leading RFC 2616 token of s and the string following -// the token. -func nextToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if !isTokenOctet[s[i]] { - break - } - } - return s[:i], s[i:] -} - -// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616 -// and the string following the token or quoted string. -func nextTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return nextToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + 1; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} - -// equalASCIIFold returns true if s is equal to t with ASCII case folding as -// defined in RFC 4790. -func equalASCIIFold(s, t string) bool { - for s != "" && t != "" { - sr, size := utf8.DecodeRuneInString(s) - s = s[size:] - tr, size := utf8.DecodeRuneInString(t) - t = t[size:] - if sr == tr { - continue - } - if 'A' <= sr && sr <= 'Z' { - sr = sr + 'a' - 'A' - } - if 'A' <= tr && tr <= 'Z' { - tr = tr + 'a' - 'A' - } - if sr != tr { - return false - } - } - return s == t -} - -// tokenListContainsValue returns true if the 1#token header with the given -// name contains a token equal to value with ASCII case folding. -func tokenListContainsValue(header http.Header, name string, value string) bool { -headers: - for _, s := range header[name] { - for { - var t string - t, s = nextToken(skipSpace(s)) - if t == "" { - continue headers - } - s = skipSpace(s) - if s != "" && s[0] != ',' { - continue headers - } - if equalASCIIFold(t, value) { - return true - } - if s == "" { - continue headers - } - s = s[1:] - } - } - return false -} - -// parseExtensions parses WebSocket extensions from a header. -func parseExtensions(header http.Header) []map[string]string { - // From RFC 6455: - // - // Sec-WebSocket-Extensions = extension-list - // extension-list = 1#extension - // extension = extension-token *( ";" extension-param ) - // extension-token = registered-token - // registered-token = token - // extension-param = token [ "=" (token | quoted-string) ] - // ;When using the quoted-string syntax variant, the value - // ;after quoted-string unescaping MUST conform to the - // ;'token' ABNF. - - var result []map[string]string -headers: - for _, s := range header["Sec-Websocket-Extensions"] { - for { - var t string - t, s = nextToken(skipSpace(s)) - if t == "" { - continue headers - } - ext := map[string]string{"": t} - for { - s = skipSpace(s) - if !strings.HasPrefix(s, ";") { - break - } - var k string - k, s = nextToken(skipSpace(s[1:])) - if k == "" { - continue headers - } - s = skipSpace(s) - var v string - if strings.HasPrefix(s, "=") { - v, s = nextTokenOrQuoted(skipSpace(s[1:])) - s = skipSpace(s) - } - if s != "" && s[0] != ',' && s[0] != ';' { - continue headers - } - ext[k] = v - } - if s != "" && s[0] != ',' { - continue headers - } - result = append(result, ext) - if s == "" { - continue headers - } - s = s[1:] - } - } - return result -} diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go deleted file mode 100644 index 2e668f6b..00000000 --- a/vendor/github.com/gorilla/websocket/x_net_proxy.go +++ /dev/null @@ -1,473 +0,0 @@ -// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. -//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy - -// Package proxy provides support for a variety of protocols to proxy network -// data. -// - -package websocket - -import ( - "errors" - "io" - "net" - "net/url" - "os" - "strconv" - "strings" - "sync" -) - -type proxy_direct struct{} - -// Direct is a direct proxy: one that makes network connections directly. -var proxy_Direct = proxy_direct{} - -func (proxy_direct) Dial(network, addr string) (net.Conn, error) { - return net.Dial(network, addr) -} - -// A PerHost directs connections to a default Dialer unless the host name -// requested matches one of a number of exceptions. -type proxy_PerHost struct { - def, bypass proxy_Dialer - - bypassNetworks []*net.IPNet - bypassIPs []net.IP - bypassZones []string - bypassHosts []string -} - -// NewPerHost returns a PerHost Dialer that directs connections to either -// defaultDialer or bypass, depending on whether the connection matches one of -// the configured rules. -func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost { - return &proxy_PerHost{ - def: defaultDialer, - bypass: bypass, - } -} - -// Dial connects to the address addr on the given network through either -// defaultDialer or bypass. -func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - - return p.dialerForRequest(host).Dial(network, addr) -} - -func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer { - if ip := net.ParseIP(host); ip != nil { - for _, net := range p.bypassNetworks { - if net.Contains(ip) { - return p.bypass - } - } - for _, bypassIP := range p.bypassIPs { - if bypassIP.Equal(ip) { - return p.bypass - } - } - return p.def - } - - for _, zone := range p.bypassZones { - if strings.HasSuffix(host, zone) { - return p.bypass - } - if host == zone[1:] { - // For a zone ".example.com", we match "example.com" - // too. - return p.bypass - } - } - for _, bypassHost := range p.bypassHosts { - if bypassHost == host { - return p.bypass - } - } - return p.def -} - -// AddFromString parses a string that contains comma-separated values -// specifying hosts that should use the bypass proxy. Each value is either an -// IP address, a CIDR range, a zone (*.example.com) or a host name -// (localhost). A best effort is made to parse the string and errors are -// ignored. -func (p *proxy_PerHost) AddFromString(s string) { - hosts := strings.Split(s, ",") - for _, host := range hosts { - host = strings.TrimSpace(host) - if len(host) == 0 { - continue - } - if strings.Contains(host, "/") { - // We assume that it's a CIDR address like 127.0.0.0/8 - if _, net, err := net.ParseCIDR(host); err == nil { - p.AddNetwork(net) - } - continue - } - if ip := net.ParseIP(host); ip != nil { - p.AddIP(ip) - continue - } - if strings.HasPrefix(host, "*.") { - p.AddZone(host[1:]) - continue - } - p.AddHost(host) - } -} - -// AddIP specifies an IP address that will use the bypass proxy. Note that -// this will only take effect if a literal IP address is dialed. A connection -// to a named host will never match an IP. -func (p *proxy_PerHost) AddIP(ip net.IP) { - p.bypassIPs = append(p.bypassIPs, ip) -} - -// AddNetwork specifies an IP range that will use the bypass proxy. Note that -// this will only take effect if a literal IP address is dialed. A connection -// to a named host will never match. -func (p *proxy_PerHost) AddNetwork(net *net.IPNet) { - p.bypassNetworks = append(p.bypassNetworks, net) -} - -// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of -// "example.com" matches "example.com" and all of its subdomains. -func (p *proxy_PerHost) AddZone(zone string) { - if strings.HasSuffix(zone, ".") { - zone = zone[:len(zone)-1] - } - if !strings.HasPrefix(zone, ".") { - zone = "." + zone - } - p.bypassZones = append(p.bypassZones, zone) -} - -// AddHost specifies a host name that will use the bypass proxy. -func (p *proxy_PerHost) AddHost(host string) { - if strings.HasSuffix(host, ".") { - host = host[:len(host)-1] - } - p.bypassHosts = append(p.bypassHosts, host) -} - -// A Dialer is a means to establish a connection. -type proxy_Dialer interface { - // Dial connects to the given address via the proxy. - Dial(network, addr string) (c net.Conn, err error) -} - -// Auth contains authentication parameters that specific Dialers may require. -type proxy_Auth struct { - User, Password string -} - -// FromEnvironment returns the dialer specified by the proxy related variables in -// the environment. -func proxy_FromEnvironment() proxy_Dialer { - allProxy := proxy_allProxyEnv.Get() - if len(allProxy) == 0 { - return proxy_Direct - } - - proxyURL, err := url.Parse(allProxy) - if err != nil { - return proxy_Direct - } - proxy, err := proxy_FromURL(proxyURL, proxy_Direct) - if err != nil { - return proxy_Direct - } - - noProxy := proxy_noProxyEnv.Get() - if len(noProxy) == 0 { - return proxy - } - - perHost := proxy_NewPerHost(proxy, proxy_Direct) - perHost.AddFromString(noProxy) - return perHost -} - -// proxySchemes is a map from URL schemes to a function that creates a Dialer -// from a URL with such a scheme. -var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error) - -// RegisterDialerType takes a URL scheme and a function to generate Dialers from -// a URL with that scheme and a forwarding Dialer. Registered schemes are used -// by FromURL. -func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) { - if proxy_proxySchemes == nil { - proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) - } - proxy_proxySchemes[scheme] = f -} - -// FromURL returns a Dialer given a URL specification and an underlying -// Dialer for it to make network requests. -func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) { - var auth *proxy_Auth - if u.User != nil { - auth = new(proxy_Auth) - auth.User = u.User.Username() - if p, ok := u.User.Password(); ok { - auth.Password = p - } - } - - switch u.Scheme { - case "socks5": - return proxy_SOCKS5("tcp", u.Host, auth, forward) - } - - // If the scheme doesn't match any of the built-in schemes, see if it - // was registered by another package. - if proxy_proxySchemes != nil { - if f, ok := proxy_proxySchemes[u.Scheme]; ok { - return f(u, forward) - } - } - - return nil, errors.New("proxy: unknown scheme: " + u.Scheme) -} - -var ( - proxy_allProxyEnv = &proxy_envOnce{ - names: []string{"ALL_PROXY", "all_proxy"}, - } - proxy_noProxyEnv = &proxy_envOnce{ - names: []string{"NO_PROXY", "no_proxy"}, - } -) - -// envOnce looks up an environment variable (optionally by multiple -// names) once. It mitigates expensive lookups on some platforms -// (e.g. Windows). -// (Borrowed from net/http/transport.go) -type proxy_envOnce struct { - names []string - once sync.Once - val string -} - -func (e *proxy_envOnce) Get() string { - e.once.Do(e.init) - return e.val -} - -func (e *proxy_envOnce) init() { - for _, n := range e.names { - e.val = os.Getenv(n) - if e.val != "" { - return - } - } -} - -// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address -// with an optional username and password. See RFC 1928 and RFC 1929. -func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) { - s := &proxy_socks5{ - network: network, - addr: addr, - forward: forward, - } - if auth != nil { - s.user = auth.User - s.password = auth.Password - } - - return s, nil -} - -type proxy_socks5 struct { - user, password string - network, addr string - forward proxy_Dialer -} - -const proxy_socks5Version = 5 - -const ( - proxy_socks5AuthNone = 0 - proxy_socks5AuthPassword = 2 -) - -const proxy_socks5Connect = 1 - -const ( - proxy_socks5IP4 = 1 - proxy_socks5Domain = 3 - proxy_socks5IP6 = 4 -) - -var proxy_socks5Errors = []string{ - "", - "general failure", - "connection forbidden", - "network unreachable", - "host unreachable", - "connection refused", - "TTL expired", - "command not supported", - "address type not supported", -} - -// Dial connects to the address addr on the given network via the SOCKS5 proxy. -func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) { - switch network { - case "tcp", "tcp6", "tcp4": - default: - return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) - } - - conn, err := s.forward.Dial(s.network, s.addr) - if err != nil { - return nil, err - } - if err := s.connect(conn, addr); err != nil { - conn.Close() - return nil, err - } - return conn, nil -} - -// connect takes an existing connection to a socks5 proxy server, -// and commands the server to extend that connection to target, -// which must be a canonical address with a host and port. -func (s *proxy_socks5) connect(conn net.Conn, target string) error { - host, portStr, err := net.SplitHostPort(target) - if err != nil { - return err - } - - port, err := strconv.Atoi(portStr) - if err != nil { - return errors.New("proxy: failed to parse port number: " + portStr) - } - if port < 1 || port > 0xffff { - return errors.New("proxy: port number out of range: " + portStr) - } - - // the size here is just an estimate - buf := make([]byte, 0, 6+len(host)) - - buf = append(buf, proxy_socks5Version) - if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { - buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword) - } else { - buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone) - } - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - if buf[0] != 5 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) - } - if buf[1] == 0xff { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") - } - - // See RFC 1929 - if buf[1] == proxy_socks5AuthPassword { - buf = buf[:0] - buf = append(buf, 1 /* password protocol version */) - buf = append(buf, uint8(len(s.user))) - buf = append(buf, s.user...) - buf = append(buf, uint8(len(s.password))) - buf = append(buf, s.password...) - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if buf[1] != 0 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") - } - } - - buf = buf[:0] - buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */) - - if ip := net.ParseIP(host); ip != nil { - if ip4 := ip.To4(); ip4 != nil { - buf = append(buf, proxy_socks5IP4) - ip = ip4 - } else { - buf = append(buf, proxy_socks5IP6) - } - buf = append(buf, ip...) - } else { - if len(host) > 255 { - return errors.New("proxy: destination host name too long: " + host) - } - buf = append(buf, proxy_socks5Domain) - buf = append(buf, byte(len(host))) - buf = append(buf, host...) - } - buf = append(buf, byte(port>>8), byte(port)) - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:4]); err != nil { - return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - failure := "unknown error" - if int(buf[1]) < len(proxy_socks5Errors) { - failure = proxy_socks5Errors[buf[1]] - } - - if len(failure) > 0 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) - } - - bytesToDiscard := 0 - switch buf[3] { - case proxy_socks5IP4: - bytesToDiscard = net.IPv4len - case proxy_socks5IP6: - bytesToDiscard = net.IPv6len - case proxy_socks5Domain: - _, err := io.ReadFull(conn, buf[:1]) - if err != nil { - return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - bytesToDiscard = int(buf[0]) - default: - return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) - } - - if cap(buf) < bytesToDiscard { - buf = make([]byte, bytesToDiscard) - } else { - buf = buf[:bytesToDiscard] - } - if _, err := io.ReadFull(conn, buf); err != nil { - return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - // Also need to discard the port number - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - return nil -} diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE deleted file mode 100644 index 87d55747..00000000 --- a/vendor/github.com/klauspost/compress/LICENSE +++ /dev/null @@ -1,304 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2019 Klaus Post. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------- - -Files: gzhttp/* - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016-2017 The New York Times Company - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ------------------- - -Files: s2/cmd/internal/readahead/* - -The MIT License (MIT) - -Copyright (c) 2015 Klaus Post - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------------------- -Files: snappy/* -Files: internal/snapref/* - -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------ - -Files: s2/cmd/internal/filepathx/* - -Copyright 2016 The filepathx Authors - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go deleted file mode 100644 index de912e18..00000000 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ /dev/null @@ -1,1017 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Copyright (c) 2015 Klaus Post -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "math" -) - -const ( - NoCompression = 0 - BestSpeed = 1 - BestCompression = 9 - DefaultCompression = -1 - - // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman - // entropy encoding. This mode is useful in compressing data that has - // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4) - // that lacks an entropy encoder. Compression gains are achieved when - // certain bytes in the input stream occur more frequently than others. - // - // Note that HuffmanOnly produces a compressed output that is - // RFC 1951 compliant. That is, any valid DEFLATE decompressor will - // continue to be able to decompress this output. - HuffmanOnly = -2 - ConstantCompression = HuffmanOnly // compatibility alias. - - logWindowSize = 15 - windowSize = 1 << logWindowSize - windowMask = windowSize - 1 - logMaxOffsetSize = 15 // Standard DEFLATE - minMatchLength = 4 // The smallest match that the compressor looks for - maxMatchLength = 258 // The longest match for the compressor - minOffsetSize = 1 // The shortest offset that makes any sense - - // The maximum number of tokens we will encode at the time. - // Smaller sizes usually creates less optimal blocks. - // Bigger can make context switching slow. - // We use this for levels 7-9, so we make it big. - maxFlateBlockTokens = 1 << 15 - maxStoreBlockSize = 65535 - hashBits = 17 // After 17 performance degrades - hashSize = 1 << hashBits - hashMask = (1 << hashBits) - 1 - hashShift = (hashBits + minMatchLength - 1) / minMatchLength - maxHashOffset = 1 << 28 - - skipNever = math.MaxInt32 - - debugDeflate = false -) - -type compressionLevel struct { - good, lazy, nice, chain, fastSkipHashing, level int -} - -// Compression levels have been rebalanced from zlib deflate defaults -// to give a bigger spread in speed and compression. -// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/ -var levels = []compressionLevel{ - {}, // 0 - // Level 1-6 uses specialized algorithm - values not used - {0, 0, 0, 0, 0, 1}, - {0, 0, 0, 0, 0, 2}, - {0, 0, 0, 0, 0, 3}, - {0, 0, 0, 0, 0, 4}, - {0, 0, 0, 0, 0, 5}, - {0, 0, 0, 0, 0, 6}, - // Levels 7-9 use increasingly more lazy matching - // and increasingly stringent conditions for "good enough". - {8, 12, 16, 24, skipNever, 7}, - {16, 30, 40, 64, skipNever, 8}, - {32, 258, 258, 1024, skipNever, 9}, -} - -// advancedState contains state for the advanced levels, with bigger hash tables, etc. -type advancedState struct { - // deflate state - length int - offset int - maxInsertIndex int - chainHead int - hashOffset int - - ii uint16 // position of last match, intended to overflow to reset. - - // input window: unprocessed data is window[index:windowEnd] - index int - hashMatch [maxMatchLength + minMatchLength]uint32 - - // Input hash chains - // hashHead[hashValue] contains the largest inputIndex with the specified hash value - // If hashHead[hashValue] is within the current window, then - // hashPrev[hashHead[hashValue] & windowMask] contains the previous index - // with the same hash value. - hashHead [hashSize]uint32 - hashPrev [windowSize]uint32 -} - -type compressor struct { - compressionLevel - - h *huffmanEncoder - w *huffmanBitWriter - - // compression algorithm - fill func(*compressor, []byte) int // copy data to window - step func(*compressor) // process window - - window []byte - windowEnd int - blockStart int // window index where current tokens start - err error - - // queued output tokens - tokens tokens - fast fastEnc - state *advancedState - - sync bool // requesting flush - byteAvailable bool // if true, still need to process window[index-1]. -} - -func (d *compressor) fillDeflate(b []byte) int { - s := d.state - if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) { - // shift the window by windowSize - //copy(d.window[:], d.window[windowSize:2*windowSize]) - *(*[windowSize]byte)(d.window) = *(*[windowSize]byte)(d.window[windowSize:]) - s.index -= windowSize - d.windowEnd -= windowSize - if d.blockStart >= windowSize { - d.blockStart -= windowSize - } else { - d.blockStart = math.MaxInt32 - } - s.hashOffset += windowSize - if s.hashOffset > maxHashOffset { - delta := s.hashOffset - 1 - s.hashOffset -= delta - s.chainHead -= delta - // Iterate over slices instead of arrays to avoid copying - // the entire table onto the stack (Issue #18625). - for i, v := range s.hashPrev[:] { - if int(v) > delta { - s.hashPrev[i] = uint32(int(v) - delta) - } else { - s.hashPrev[i] = 0 - } - } - for i, v := range s.hashHead[:] { - if int(v) > delta { - s.hashHead[i] = uint32(int(v) - delta) - } else { - s.hashHead[i] = 0 - } - } - } - } - n := copy(d.window[d.windowEnd:], b) - d.windowEnd += n - return n -} - -func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error { - if index > 0 || eof { - var window []byte - if d.blockStart <= index { - window = d.window[d.blockStart:index] - } - d.blockStart = index - //d.w.writeBlock(tok, eof, window) - d.w.writeBlockDynamic(tok, eof, window, d.sync) - return d.w.err - } - return nil -} - -// writeBlockSkip writes the current block and uses the number of tokens -// to determine if the block should be stored on no matches, or -// only huffman encoded. -func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error { - if index > 0 || eof { - if d.blockStart <= index { - window := d.window[d.blockStart:index] - // If we removed less than a 64th of all literals - // we huffman compress the block. - if int(tok.n) > len(window)-int(tok.n>>6) { - d.w.writeBlockHuff(eof, window, d.sync) - } else { - // Write a dynamic huffman block. - d.w.writeBlockDynamic(tok, eof, window, d.sync) - } - } else { - d.w.writeBlock(tok, eof, nil) - } - d.blockStart = index - return d.w.err - } - return nil -} - -// fillWindow will fill the current window with the supplied -// dictionary and calculate all hashes. -// This is much faster than doing a full encode. -// Should only be used after a start/reset. -func (d *compressor) fillWindow(b []byte) { - // Do not fill window if we are in store-only or huffman mode. - if d.level <= 0 { - return - } - if d.fast != nil { - // encode the last data, but discard the result - if len(b) > maxMatchOffset { - b = b[len(b)-maxMatchOffset:] - } - d.fast.Encode(&d.tokens, b) - d.tokens.Reset() - return - } - s := d.state - // If we are given too much, cut it. - if len(b) > windowSize { - b = b[len(b)-windowSize:] - } - // Add all to window. - n := copy(d.window[d.windowEnd:], b) - - // Calculate 256 hashes at the time (more L1 cache hits) - loops := (n + 256 - minMatchLength) / 256 - for j := 0; j < loops; j++ { - startindex := j * 256 - end := startindex + 256 + minMatchLength - 1 - if end > n { - end = n - } - tocheck := d.window[startindex:end] - dstSize := len(tocheck) - minMatchLength + 1 - - if dstSize <= 0 { - continue - } - - dst := s.hashMatch[:dstSize] - bulkHash4(tocheck, dst) - var newH uint32 - for i, val := range dst { - di := i + startindex - newH = val & hashMask - // Get previous value with the same hash. - // Our chain should point to the previous value. - s.hashPrev[di&windowMask] = s.hashHead[newH] - // Set the head of the hash chain to us. - s.hashHead[newH] = uint32(di + s.hashOffset) - } - } - // Update window information. - d.windowEnd += n - s.index = n -} - -// Try to find a match starting at index whose length is greater than prevSize. -// We only look at chainCount possibilities before giving up. -// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead -func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) { - minMatchLook := maxMatchLength - if lookahead < minMatchLook { - minMatchLook = lookahead - } - - win := d.window[0 : pos+minMatchLook] - - // We quit when we get a match that's at least nice long - nice := len(win) - pos - if d.nice < nice { - nice = d.nice - } - - // If we've got a match that's good enough, only look in 1/4 the chain. - tries := d.chain - length = minMatchLength - 1 - - wEnd := win[pos+length] - wPos := win[pos:] - minIndex := pos - windowSize - if minIndex < 0 { - minIndex = 0 - } - offset = 0 - - if d.chain < 100 { - for i := prevHead; tries > 0; tries-- { - if wEnd == win[i+length] { - n := matchLen(win[i:i+minMatchLook], wPos) - if n > length { - length = n - offset = pos - i - ok = true - if n >= nice { - // The match is good enough that we don't try to find a better one. - break - } - wEnd = win[pos+n] - } - } - if i <= minIndex { - // hashPrev[i & windowMask] has already been overwritten, so stop now. - break - } - i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset - if i < minIndex { - break - } - } - return - } - - // Minimum gain to accept a match. - cGain := 4 - - // Some like it higher (CSV), some like it lower (JSON) - const baseCost = 3 - // Base is 4 bytes at with an additional cost. - // Matches must be better than this. - - for i := prevHead; tries > 0; tries-- { - if wEnd == win[i+length] { - n := matchLen(win[i:i+minMatchLook], wPos) - if n > length { - // Calculate gain. Estimate - newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]]) - - //fmt.Println("gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]), "this-len:", n, "prev-len:", length) - if newGain > cGain { - length = n - offset = pos - i - cGain = newGain - ok = true - if n >= nice { - // The match is good enough that we don't try to find a better one. - break - } - wEnd = win[pos+n] - } - } - } - if i <= minIndex { - // hashPrev[i & windowMask] has already been overwritten, so stop now. - break - } - i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset - if i < minIndex { - break - } - } - return -} - -func (d *compressor) writeStoredBlock(buf []byte) error { - if d.w.writeStoredHeader(len(buf), false); d.w.err != nil { - return d.w.err - } - d.w.writeBytes(buf) - return d.w.err -} - -// hash4 returns a hash representation of the first 4 bytes -// of the supplied slice. -// The caller must ensure that len(b) >= 4. -func hash4(b []byte) uint32 { - return hash4u(binary.LittleEndian.Uint32(b), hashBits) -} - -// hash4 returns the hash of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash4u(u uint32, h uint8) uint32 { - return (u * prime4bytes) >> (32 - h) -} - -// bulkHash4 will compute hashes using the same -// algorithm as hash4 -func bulkHash4(b []byte, dst []uint32) { - if len(b) < 4 { - return - } - hb := binary.LittleEndian.Uint32(b) - - dst[0] = hash4u(hb, hashBits) - end := len(b) - 4 + 1 - for i := 1; i < end; i++ { - hb = (hb >> 8) | uint32(b[i+3])<<24 - dst[i] = hash4u(hb, hashBits) - } -} - -func (d *compressor) initDeflate() { - d.window = make([]byte, 2*windowSize) - d.byteAvailable = false - d.err = nil - if d.state == nil { - return - } - s := d.state - s.index = 0 - s.hashOffset = 1 - s.length = minMatchLength - 1 - s.offset = 0 - s.chainHead = -1 -} - -// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, -// meaning it always has lazy matching on. -func (d *compressor) deflateLazy() { - s := d.state - // Sanity enables additional runtime tests. - // It's intended to be used during development - // to supplement the currently ad-hoc unit tests. - const sanity = debugDeflate - - if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { - return - } - if d.windowEnd != s.index && d.chain > 100 { - // Get literal huffman coder. - if d.h == nil { - d.h = newHuffmanEncoder(maxFlateBlockTokens) - } - var tmp [256]uint16 - for _, v := range d.window[s.index:d.windowEnd] { - tmp[v]++ - } - d.h.generate(tmp[:], 15) - } - - s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) - - for { - if sanity && s.index > d.windowEnd { - panic("index > windowEnd") - } - lookahead := d.windowEnd - s.index - if lookahead < minMatchLength+maxMatchLength { - if !d.sync { - return - } - if sanity && s.index > d.windowEnd { - panic("index > windowEnd") - } - if lookahead == 0 { - // Flush current output block if any. - if d.byteAvailable { - // There is still one pending token that needs to be flushed - d.tokens.AddLiteral(d.window[s.index-1]) - d.byteAvailable = false - } - if d.tokens.n > 0 { - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - return - } - } - if s.index < s.maxInsertIndex { - // Update the hash - hash := hash4(d.window[s.index:]) - ch := s.hashHead[hash] - s.chainHead = int(ch) - s.hashPrev[s.index&windowMask] = ch - s.hashHead[hash] = uint32(s.index + s.hashOffset) - } - prevLength := s.length - prevOffset := s.offset - s.length = minMatchLength - 1 - s.offset = 0 - minIndex := s.index - windowSize - if minIndex < 0 { - minIndex = 0 - } - - if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { - if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok { - s.length = newLength - s.offset = newOffset - } - } - - if prevLength >= minMatchLength && s.length <= prevLength { - // No better match, but check for better match at end... - // - // Skip forward a number of bytes. - // Offset of 2 seems to yield best results. 3 is sometimes better. - const checkOff = 2 - - // Check all, except full length - if prevLength < maxMatchLength-checkOff { - prevIndex := s.index - 1 - if prevIndex+prevLength < s.maxInsertIndex { - end := lookahead - if lookahead > maxMatchLength+checkOff { - end = maxMatchLength + checkOff - } - end += prevIndex - - // Hash at match end. - h := hash4(d.window[prevIndex+prevLength:]) - ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength - if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff { - length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:]) - // It seems like a pure length metric is best. - if length > prevLength { - prevLength = length - prevOffset = prevIndex - ch2 - - // Extend back... - for i := checkOff - 1; i >= 0; i-- { - if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i] { - // Emit tokens we "owe" - for j := 0; j <= i; j++ { - d.tokens.AddLiteral(d.window[prevIndex+j]) - if d.tokens.n == maxFlateBlockTokens { - // The block includes the current character - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - s.index++ - if s.index < s.maxInsertIndex { - h := hash4(d.window[s.index:]) - ch := s.hashHead[h] - s.chainHead = int(ch) - s.hashPrev[s.index&windowMask] = ch - s.hashHead[h] = uint32(s.index + s.hashOffset) - } - } - break - } else { - prevLength++ - } - } - } else if false { - // Check one further ahead. - // Only rarely better, disabled for now. - prevIndex++ - h := hash4(d.window[prevIndex+prevLength:]) - ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength - if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff { - length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:]) - // It seems like a pure length metric is best. - if length > prevLength+checkOff { - prevLength = length - prevOffset = prevIndex - ch2 - prevIndex-- - - // Extend back... - for i := checkOff; i >= 0; i-- { - if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i-1] { - // Emit tokens we "owe" - for j := 0; j <= i; j++ { - d.tokens.AddLiteral(d.window[prevIndex+j]) - if d.tokens.n == maxFlateBlockTokens { - // The block includes the current character - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - s.index++ - if s.index < s.maxInsertIndex { - h := hash4(d.window[s.index:]) - ch := s.hashHead[h] - s.chainHead = int(ch) - s.hashPrev[s.index&windowMask] = ch - s.hashHead[h] = uint32(s.index + s.hashOffset) - } - } - break - } else { - prevLength++ - } - } - } - } - } - } - } - } - // There was a match at the previous step, and the current match is - // not better. Output the previous match. - d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) - - // Insert in the hash table all strings up to the end of the match. - // index and index-1 are already inserted. If there is not enough - // lookahead, the last two strings are not inserted into the hash - // table. - newIndex := s.index + prevLength - 1 - // Calculate missing hashes - end := newIndex - if end > s.maxInsertIndex { - end = s.maxInsertIndex - } - end += minMatchLength - 1 - startindex := s.index + 1 - if startindex > s.maxInsertIndex { - startindex = s.maxInsertIndex - } - tocheck := d.window[startindex:end] - dstSize := len(tocheck) - minMatchLength + 1 - if dstSize > 0 { - dst := s.hashMatch[:dstSize] - bulkHash4(tocheck, dst) - var newH uint32 - for i, val := range dst { - di := i + startindex - newH = val & hashMask - // Get previous value with the same hash. - // Our chain should point to the previous value. - s.hashPrev[di&windowMask] = s.hashHead[newH] - // Set the head of the hash chain to us. - s.hashHead[newH] = uint32(di + s.hashOffset) - } - } - - s.index = newIndex - d.byteAvailable = false - s.length = minMatchLength - 1 - if d.tokens.n == maxFlateBlockTokens { - // The block includes the current character - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - s.ii = 0 - } else { - // Reset, if we got a match this run. - if s.length >= minMatchLength { - s.ii = 0 - } - // We have a byte waiting. Emit it. - if d.byteAvailable { - s.ii++ - d.tokens.AddLiteral(d.window[s.index-1]) - if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - s.index++ - - // If we have a long run of no matches, skip additional bytes - // Resets when s.ii overflows after 64KB. - if n := int(s.ii) - d.chain; n > 0 { - n = 1 + int(n>>6) - for j := 0; j < n; j++ { - if s.index >= d.windowEnd-1 { - break - } - d.tokens.AddLiteral(d.window[s.index-1]) - if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - // Index... - if s.index < s.maxInsertIndex { - h := hash4(d.window[s.index:]) - ch := s.hashHead[h] - s.chainHead = int(ch) - s.hashPrev[s.index&windowMask] = ch - s.hashHead[h] = uint32(s.index + s.hashOffset) - } - s.index++ - } - // Flush last byte - d.tokens.AddLiteral(d.window[s.index-1]) - d.byteAvailable = false - // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength - if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - } - } else { - s.index++ - d.byteAvailable = true - } - } - } -} - -func (d *compressor) store() { - if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) { - d.err = d.writeStoredBlock(d.window[:d.windowEnd]) - d.windowEnd = 0 - } -} - -// fillWindow will fill the buffer with data for huffman-only compression. -// The number of bytes copied is returned. -func (d *compressor) fillBlock(b []byte) int { - n := copy(d.window[d.windowEnd:], b) - d.windowEnd += n - return n -} - -// storeHuff will compress and store the currently added data, -// if enough has been accumulated or we at the end of the stream. -// Any error that occurred will be in d.err -func (d *compressor) storeHuff() { - if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 { - return - } - d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) - d.err = d.w.err - d.windowEnd = 0 -} - -// storeFast will compress and store the currently added data, -// if enough has been accumulated or we at the end of the stream. -// Any error that occurred will be in d.err -func (d *compressor) storeFast() { - // We only compress if we have maxStoreBlockSize. - if d.windowEnd < len(d.window) { - if !d.sync { - return - } - // Handle extremely small sizes. - if d.windowEnd < 128 { - if d.windowEnd == 0 { - return - } - if d.windowEnd <= 32 { - d.err = d.writeStoredBlock(d.window[:d.windowEnd]) - } else { - d.w.writeBlockHuff(false, d.window[:d.windowEnd], true) - d.err = d.w.err - } - d.tokens.Reset() - d.windowEnd = 0 - d.fast.Reset() - return - } - } - - d.fast.Encode(&d.tokens, d.window[:d.windowEnd]) - // If we made zero matches, store the block as is. - if d.tokens.n == 0 { - d.err = d.writeStoredBlock(d.window[:d.windowEnd]) - // If we removed less than 1/16th, huffman compress the block. - } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) { - d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) - d.err = d.w.err - } else { - d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync) - d.err = d.w.err - } - d.tokens.Reset() - d.windowEnd = 0 -} - -// write will add input byte to the stream. -// Unless an error occurs all bytes will be consumed. -func (d *compressor) write(b []byte) (n int, err error) { - if d.err != nil { - return 0, d.err - } - n = len(b) - for len(b) > 0 { - if d.windowEnd == len(d.window) || d.sync { - d.step(d) - } - b = b[d.fill(d, b):] - if d.err != nil { - return 0, d.err - } - } - return n, d.err -} - -func (d *compressor) syncFlush() error { - d.sync = true - if d.err != nil { - return d.err - } - d.step(d) - if d.err == nil { - d.w.writeStoredHeader(0, false) - d.w.flush() - d.err = d.w.err - } - d.sync = false - return d.err -} - -func (d *compressor) init(w io.Writer, level int) (err error) { - d.w = newHuffmanBitWriter(w) - - switch { - case level == NoCompression: - d.window = make([]byte, maxStoreBlockSize) - d.fill = (*compressor).fillBlock - d.step = (*compressor).store - case level == ConstantCompression: - d.w.logNewTablePenalty = 10 - d.window = make([]byte, 32<<10) - d.fill = (*compressor).fillBlock - d.step = (*compressor).storeHuff - case level == DefaultCompression: - level = 5 - fallthrough - case level >= 1 && level <= 6: - d.w.logNewTablePenalty = 7 - d.fast = newFastEnc(level) - d.window = make([]byte, maxStoreBlockSize) - d.fill = (*compressor).fillBlock - d.step = (*compressor).storeFast - case 7 <= level && level <= 9: - d.w.logNewTablePenalty = 8 - d.state = &advancedState{} - d.compressionLevel = levels[level] - d.initDeflate() - d.fill = (*compressor).fillDeflate - d.step = (*compressor).deflateLazy - case -level >= MinCustomWindowSize && -level <= MaxCustomWindowSize: - d.w.logNewTablePenalty = 7 - d.fast = &fastEncL5Window{maxOffset: int32(-level), cur: maxStoreBlockSize} - d.window = make([]byte, maxStoreBlockSize) - d.fill = (*compressor).fillBlock - d.step = (*compressor).storeFast - default: - return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) - } - d.level = level - return nil -} - -// reset the state of the compressor. -func (d *compressor) reset(w io.Writer) { - d.w.reset(w) - d.sync = false - d.err = nil - // We only need to reset a few things for Snappy. - if d.fast != nil { - d.fast.Reset() - d.windowEnd = 0 - d.tokens.Reset() - return - } - switch d.compressionLevel.chain { - case 0: - // level was NoCompression or ConstantCompresssion. - d.windowEnd = 0 - default: - s := d.state - s.chainHead = -1 - for i := range s.hashHead { - s.hashHead[i] = 0 - } - for i := range s.hashPrev { - s.hashPrev[i] = 0 - } - s.hashOffset = 1 - s.index, d.windowEnd = 0, 0 - d.blockStart, d.byteAvailable = 0, false - d.tokens.Reset() - s.length = minMatchLength - 1 - s.offset = 0 - s.ii = 0 - s.maxInsertIndex = 0 - } -} - -func (d *compressor) close() error { - if d.err != nil { - return d.err - } - d.sync = true - d.step(d) - if d.err != nil { - return d.err - } - if d.w.writeStoredHeader(0, true); d.w.err != nil { - return d.w.err - } - d.w.flush() - d.w.reset(nil) - return d.w.err -} - -// NewWriter returns a new Writer compressing data at the given level. -// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression); -// higher levels typically run slower but compress more. -// Level 0 (NoCompression) does not attempt any compression; it only adds the -// necessary DEFLATE framing. -// Level -1 (DefaultCompression) uses the default compression level. -// Level -2 (ConstantCompression) will use Huffman compression only, giving -// a very fast compression for all types of input, but sacrificing considerable -// compression efficiency. -// -// If level is in the range [-2, 9] then the error returned will be nil. -// Otherwise the error returned will be non-nil. -func NewWriter(w io.Writer, level int) (*Writer, error) { - var dw Writer - if err := dw.d.init(w, level); err != nil { - return nil, err - } - return &dw, nil -} - -// NewWriterDict is like NewWriter but initializes the new -// Writer with a preset dictionary. The returned Writer behaves -// as if the dictionary had been written to it without producing -// any compressed output. The compressed data written to w -// can only be decompressed by a Reader initialized with the -// same dictionary. -func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { - zw, err := NewWriter(w, level) - if err != nil { - return nil, err - } - zw.d.fillWindow(dict) - zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method. - return zw, err -} - -// MinCustomWindowSize is the minimum window size that can be sent to NewWriterWindow. -const MinCustomWindowSize = 32 - -// MaxCustomWindowSize is the maximum custom window that can be sent to NewWriterWindow. -const MaxCustomWindowSize = windowSize - -// NewWriterWindow returns a new Writer compressing data with a custom window size. -// windowSize must be from MinCustomWindowSize to MaxCustomWindowSize. -func NewWriterWindow(w io.Writer, windowSize int) (*Writer, error) { - if windowSize < MinCustomWindowSize { - return nil, errors.New("flate: requested window size less than MinWindowSize") - } - if windowSize > MaxCustomWindowSize { - return nil, errors.New("flate: requested window size bigger than MaxCustomWindowSize") - } - var dw Writer - if err := dw.d.init(w, -windowSize); err != nil { - return nil, err - } - return &dw, nil -} - -// A Writer takes data written to it and writes the compressed -// form of that data to an underlying writer (see NewWriter). -type Writer struct { - d compressor - dict []byte -} - -// Write writes data to w, which will eventually write the -// compressed form of data to its underlying writer. -func (w *Writer) Write(data []byte) (n int, err error) { - return w.d.write(data) -} - -// Flush flushes any pending data to the underlying writer. -// It is useful mainly in compressed network protocols, to ensure that -// a remote reader has enough data to reconstruct a packet. -// Flush does not return until the data has been written. -// Calling Flush when there is no pending data still causes the Writer -// to emit a sync marker of at least 4 bytes. -// If the underlying writer returns an error, Flush returns that error. -// -// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. -func (w *Writer) Flush() error { - // For more about flushing: - // http://www.bolet.org/~pornin/deflate-flush.html - return w.d.syncFlush() -} - -// Close flushes and closes the writer. -func (w *Writer) Close() error { - return w.d.close() -} - -// Reset discards the writer's state and makes it equivalent to -// the result of NewWriter or NewWriterDict called with dst -// and w's level and dictionary. -func (w *Writer) Reset(dst io.Writer) { - if len(w.dict) > 0 { - // w was created with NewWriterDict - w.d.reset(dst) - if dst != nil { - w.d.fillWindow(w.dict) - } - } else { - // w was created with NewWriter - w.d.reset(dst) - } -} - -// ResetDict discards the writer's state and makes it equivalent to -// the result of NewWriter or NewWriterDict called with dst -// and w's level, but sets a specific dictionary. -func (w *Writer) ResetDict(dst io.Writer, dict []byte) { - w.dict = dict - w.d.reset(dst) - w.d.fillWindow(w.dict) -} diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go deleted file mode 100644 index bb36351a..00000000 --- a/vendor/github.com/klauspost/compress/flate/dict_decoder.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -// dictDecoder implements the LZ77 sliding dictionary as used in decompression. -// LZ77 decompresses data through sequences of two forms of commands: -// -// - Literal insertions: Runs of one or more symbols are inserted into the data -// stream as is. This is accomplished through the writeByte method for a -// single symbol, or combinations of writeSlice/writeMark for multiple symbols. -// Any valid stream must start with a literal insertion if no preset dictionary -// is used. -// -// - Backward copies: Runs of one or more symbols are copied from previously -// emitted data. Backward copies come as the tuple (dist, length) where dist -// determines how far back in the stream to copy from and length determines how -// many bytes to copy. Note that it is valid for the length to be greater than -// the distance. Since LZ77 uses forward copies, that situation is used to -// perform a form of run-length encoding on repeated runs of symbols. -// The writeCopy and tryWriteCopy are used to implement this command. -// -// For performance reasons, this implementation performs little to no sanity -// checks about the arguments. As such, the invariants documented for each -// method call must be respected. -type dictDecoder struct { - hist []byte // Sliding window history - - // Invariant: 0 <= rdPos <= wrPos <= len(hist) - wrPos int // Current output position in buffer - rdPos int // Have emitted hist[:rdPos] already - full bool // Has a full window length been written yet? -} - -// init initializes dictDecoder to have a sliding window dictionary of the given -// size. If a preset dict is provided, it will initialize the dictionary with -// the contents of dict. -func (dd *dictDecoder) init(size int, dict []byte) { - *dd = dictDecoder{hist: dd.hist} - - if cap(dd.hist) < size { - dd.hist = make([]byte, size) - } - dd.hist = dd.hist[:size] - - if len(dict) > len(dd.hist) { - dict = dict[len(dict)-len(dd.hist):] - } - dd.wrPos = copy(dd.hist, dict) - if dd.wrPos == len(dd.hist) { - dd.wrPos = 0 - dd.full = true - } - dd.rdPos = dd.wrPos -} - -// histSize reports the total amount of historical data in the dictionary. -func (dd *dictDecoder) histSize() int { - if dd.full { - return len(dd.hist) - } - return dd.wrPos -} - -// availRead reports the number of bytes that can be flushed by readFlush. -func (dd *dictDecoder) availRead() int { - return dd.wrPos - dd.rdPos -} - -// availWrite reports the available amount of output buffer space. -func (dd *dictDecoder) availWrite() int { - return len(dd.hist) - dd.wrPos -} - -// writeSlice returns a slice of the available buffer to write data to. -// -// This invariant will be kept: len(s) <= availWrite() -func (dd *dictDecoder) writeSlice() []byte { - return dd.hist[dd.wrPos:] -} - -// writeMark advances the writer pointer by cnt. -// -// This invariant must be kept: 0 <= cnt <= availWrite() -func (dd *dictDecoder) writeMark(cnt int) { - dd.wrPos += cnt -} - -// writeByte writes a single byte to the dictionary. -// -// This invariant must be kept: 0 < availWrite() -func (dd *dictDecoder) writeByte(c byte) { - dd.hist[dd.wrPos] = c - dd.wrPos++ -} - -// writeCopy copies a string at a given (dist, length) to the output. -// This returns the number of bytes copied and may be less than the requested -// length if the available space in the output buffer is too small. -// -// This invariant must be kept: 0 < dist <= histSize() -func (dd *dictDecoder) writeCopy(dist, length int) int { - dstBase := dd.wrPos - dstPos := dstBase - srcPos := dstPos - dist - endPos := dstPos + length - if endPos > len(dd.hist) { - endPos = len(dd.hist) - } - - // Copy non-overlapping section after destination position. - // - // This section is non-overlapping in that the copy length for this section - // is always less than or equal to the backwards distance. This can occur - // if a distance refers to data that wraps-around in the buffer. - // Thus, a backwards copy is performed here; that is, the exact bytes in - // the source prior to the copy is placed in the destination. - if srcPos < 0 { - srcPos += len(dd.hist) - dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:]) - srcPos = 0 - } - - // Copy possibly overlapping section before destination position. - // - // This section can overlap if the copy length for this section is larger - // than the backwards distance. This is allowed by LZ77 so that repeated - // strings can be succinctly represented using (dist, length) pairs. - // Thus, a forwards copy is performed here; that is, the bytes copied is - // possibly dependent on the resulting bytes in the destination as the copy - // progresses along. This is functionally equivalent to the following: - // - // for i := 0; i < endPos-dstPos; i++ { - // dd.hist[dstPos+i] = dd.hist[srcPos+i] - // } - // dstPos = endPos - // - for dstPos < endPos { - dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) - } - - dd.wrPos = dstPos - return dstPos - dstBase -} - -// tryWriteCopy tries to copy a string at a given (distance, length) to the -// output. This specialized version is optimized for short distances. -// -// This method is designed to be inlined for performance reasons. -// -// This invariant must be kept: 0 < dist <= histSize() -func (dd *dictDecoder) tryWriteCopy(dist, length int) int { - dstPos := dd.wrPos - endPos := dstPos + length - if dstPos < dist || endPos > len(dd.hist) { - return 0 - } - dstBase := dstPos - srcPos := dstPos - dist - - // Copy possibly overlapping section before destination position. -loop: - dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) - if dstPos < endPos { - goto loop // Avoid for-loop so that this function can be inlined - } - - dd.wrPos = dstPos - return dstPos - dstBase -} - -// readFlush returns a slice of the historical buffer that is ready to be -// emitted to the user. The data returned by readFlush must be fully consumed -// before calling any other dictDecoder methods. -func (dd *dictDecoder) readFlush() []byte { - toRead := dd.hist[dd.rdPos:dd.wrPos] - dd.rdPos = dd.wrPos - if dd.wrPos == len(dd.hist) { - dd.wrPos, dd.rdPos = 0, 0 - dd.full = true - } - return toRead -} diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go deleted file mode 100644 index c8124b5c..00000000 --- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Modified for deflate by Klaus Post (c) 2015. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -import ( - "encoding/binary" - "fmt" -) - -type fastEnc interface { - Encode(dst *tokens, src []byte) - Reset() -} - -func newFastEnc(level int) fastEnc { - switch level { - case 1: - return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}} - case 2: - return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}} - case 3: - return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}} - case 4: - return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}} - case 5: - return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}} - case 6: - return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}} - default: - panic("invalid level specified") - } -} - -const ( - tableBits = 15 // Bits used in the table - tableSize = 1 << tableBits // Size of the table - tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32. - baseMatchOffset = 1 // The smallest match offset - baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5 - maxMatchOffset = 1 << 15 // The largest match offset - - bTableBits = 17 // Bits used in the big tables - bTableSize = 1 << bTableBits // Size of the table - allocHistory = maxStoreBlockSize * 5 // Size to preallocate for history. - bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this. -) - -const ( - prime3bytes = 506832829 - prime4bytes = 2654435761 - prime5bytes = 889523592379 - prime6bytes = 227718039650203 - prime7bytes = 58295818150454627 - prime8bytes = 0xcf1bbcdcb7a56463 -) - -func load3232(b []byte, i int32) uint32 { - return binary.LittleEndian.Uint32(b[i:]) -} - -func load6432(b []byte, i int32) uint64 { - return binary.LittleEndian.Uint64(b[i:]) -} - -type tableEntry struct { - offset int32 -} - -// fastGen maintains the table for matches, -// and the previous byte block for level 2. -// This is the generic implementation. -type fastGen struct { - hist []byte - cur int32 -} - -func (e *fastGen) addBlock(src []byte) int32 { - // check if we have space already - if len(e.hist)+len(src) > cap(e.hist) { - if cap(e.hist) == 0 { - e.hist = make([]byte, 0, allocHistory) - } else { - if cap(e.hist) < maxMatchOffset*2 { - panic("unexpected buffer size") - } - // Move down - offset := int32(len(e.hist)) - maxMatchOffset - // copy(e.hist[0:maxMatchOffset], e.hist[offset:]) - *(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:]) - e.cur += offset - e.hist = e.hist[:maxMatchOffset] - } - } - s := int32(len(e.hist)) - e.hist = append(e.hist, src...) - return s -} - -type tableEntryPrev struct { - Cur tableEntry - Prev tableEntry -} - -// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash7(u uint64, h uint8) uint32 { - return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64)) -} - -// hashLen returns a hash of the lowest mls bytes of with length output bits. -// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. -// length should always be < 32. -// Preferably length and mls should be a constant for inlining. -func hashLen(u uint64, length, mls uint8) uint32 { - switch mls { - case 3: - return (uint32(u<<8) * prime3bytes) >> (32 - length) - case 5: - return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) - case 6: - return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) - case 7: - return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) - case 8: - return uint32((u * prime8bytes) >> (64 - length)) - default: - return (uint32(u) * prime4bytes) >> (32 - length) - } -} - -// matchlen will return the match length between offsets and t in src. -// The maximum length returned is maxMatchLength - 4. -// It is assumed that s > t, that t >=0 and s < len(src). -func (e *fastGen) matchlen(s, t int32, src []byte) int32 { - if debugDecode { - if t >= s { - panic(fmt.Sprint("t >=s:", t, s)) - } - if int(s) >= len(src) { - panic(fmt.Sprint("s >= len(src):", s, len(src))) - } - if t < 0 { - panic(fmt.Sprint("t < 0:", t)) - } - if s-t > maxMatchOffset { - panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) - } - } - s1 := int(s) + maxMatchLength - 4 - if s1 > len(src) { - s1 = len(src) - } - - // Extend the match to be as long as possible. - return int32(matchLen(src[s:s1], src[t:])) -} - -// matchlenLong will return the match length between offsets and t in src. -// It is assumed that s > t, that t >=0 and s < len(src). -func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { - if debugDeflate { - if t >= s { - panic(fmt.Sprint("t >=s:", t, s)) - } - if int(s) >= len(src) { - panic(fmt.Sprint("s >= len(src):", s, len(src))) - } - if t < 0 { - panic(fmt.Sprint("t < 0:", t)) - } - if s-t > maxMatchOffset { - panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) - } - } - // Extend the match to be as long as possible. - return int32(matchLen(src[s:], src[t:])) -} - -// Reset the encoding table. -func (e *fastGen) Reset() { - if cap(e.hist) < allocHistory { - e.hist = make([]byte, 0, allocHistory) - } - // We offset current position so everything will be out of reach. - // If we are above the buffer reset it will be cleared anyway since len(hist) == 0. - if e.cur <= bufferReset { - e.cur += maxMatchOffset + int32(len(e.hist)) - } - e.hist = e.hist[:0] -} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go deleted file mode 100644 index f70594c3..00000000 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ /dev/null @@ -1,1182 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -import ( - "encoding/binary" - "fmt" - "io" - "math" -) - -const ( - // The largest offset code. - offsetCodeCount = 30 - - // The special code used to mark the end of a block. - endBlockMarker = 256 - - // The first length code. - lengthCodesStart = 257 - - // The number of codegen codes. - codegenCodeCount = 19 - badCode = 255 - - // maxPredefinedTokens is the maximum number of tokens - // where we check if fixed size is smaller. - maxPredefinedTokens = 250 - - // bufferFlushSize indicates the buffer size - // after which bytes are flushed to the writer. - // Should preferably be a multiple of 6, since - // we accumulate 6 bytes between writes to the buffer. - bufferFlushSize = 246 -) - -// Minimum length code that emits bits. -const lengthExtraBitsMinCode = 8 - -// The number of extra bits needed by length code X - LENGTH_CODES_START. -var lengthExtraBits = [32]uint8{ - /* 257 */ 0, 0, 0, - /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, - /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, - /* 280 */ 4, 5, 5, 5, 5, 0, -} - -// The length indicated by length code X - LENGTH_CODES_START. -var lengthBase = [32]uint8{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, - 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, - 64, 80, 96, 112, 128, 160, 192, 224, 255, -} - -// Minimum offset code that emits bits. -const offsetExtraBitsMinCode = 4 - -// offset code word extra bits. -var offsetExtraBits = [32]int8{ - 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, - 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, - 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, - /* extended window */ - 14, 14, -} - -var offsetCombined = [32]uint32{} - -func init() { - var offsetBase = [32]uint32{ - /* normal deflate */ - 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, - 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, - 0x000020, 0x000030, 0x000040, 0x000060, 0x000080, - 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300, - 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000, - 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, - - /* extended window */ - 0x008000, 0x00c000, - } - - for i := range offsetCombined[:] { - // Don't use extended window values... - if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 { - continue - } - offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8) - } -} - -// The odd order in which the codegen code sizes are written. -var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} - -type huffmanBitWriter struct { - // writer is the underlying writer. - // Do not use it directly; use the write method, which ensures - // that Write errors are sticky. - writer io.Writer - - // Data waiting to be written is bytes[0:nbytes] - // and then the low nbits of bits. - bits uint64 - nbits uint8 - nbytes uint8 - lastHuffMan bool - literalEncoding *huffmanEncoder - tmpLitEncoding *huffmanEncoder - offsetEncoding *huffmanEncoder - codegenEncoding *huffmanEncoder - err error - lastHeader int - // Set between 0 (reused block can be up to 2x the size) - logNewTablePenalty uint - bytes [256 + 8]byte - literalFreq [lengthCodesStart + 32]uint16 - offsetFreq [32]uint16 - codegenFreq [codegenCodeCount]uint16 - - // codegen must have an extra space for the final symbol. - codegen [literalCount + offsetCodeCount + 1]uint8 -} - -// Huffman reuse. -// -// The huffmanBitWriter supports reusing huffman tables and thereby combining block sections. -// -// This is controlled by several variables: -// -// If lastHeader is non-zero the Huffman table can be reused. -// This also indicates that a Huffman table has been generated that can output all -// possible symbols. -// It also indicates that an EOB has not yet been emitted, so if a new tabel is generated -// an EOB with the previous table must be written. -// -// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid. -// -// An incoming block estimates the output size of a new table using a 'fresh' by calculating the -// optimal size and adding a penalty in 'logNewTablePenalty'. -// A Huffman table is not optimal, which is why we add a penalty, and generating a new table -// is slower both for compression and decompression. - -func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { - return &huffmanBitWriter{ - writer: w, - literalEncoding: newHuffmanEncoder(literalCount), - tmpLitEncoding: newHuffmanEncoder(literalCount), - codegenEncoding: newHuffmanEncoder(codegenCodeCount), - offsetEncoding: newHuffmanEncoder(offsetCodeCount), - } -} - -func (w *huffmanBitWriter) reset(writer io.Writer) { - w.writer = writer - w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil - w.lastHeader = 0 - w.lastHuffMan = false -} - -func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) { - a := t.offHist[:offsetCodeCount] - b := w.offsetEncoding.codes - b = b[:len(a)] - for i, v := range a { - if v != 0 && b[i].zero() { - return false - } - } - - a = t.extraHist[:literalCount-256] - b = w.literalEncoding.codes[256:literalCount] - b = b[:len(a)] - for i, v := range a { - if v != 0 && b[i].zero() { - return false - } - } - - a = t.litHist[:256] - b = w.literalEncoding.codes[:len(a)] - for i, v := range a { - if v != 0 && b[i].zero() { - return false - } - } - return true -} - -func (w *huffmanBitWriter) flush() { - if w.err != nil { - w.nbits = 0 - return - } - if w.lastHeader > 0 { - // We owe an EOB - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } - n := w.nbytes - for w.nbits != 0 { - w.bytes[n] = byte(w.bits) - w.bits >>= 8 - if w.nbits > 8 { // Avoid underflow - w.nbits -= 8 - } else { - w.nbits = 0 - } - n++ - } - w.bits = 0 - w.write(w.bytes[:n]) - w.nbytes = 0 -} - -func (w *huffmanBitWriter) write(b []byte) { - if w.err != nil { - return - } - _, w.err = w.writer.Write(b) -} - -func (w *huffmanBitWriter) writeBits(b int32, nb uint8) { - w.bits |= uint64(b) << (w.nbits & 63) - w.nbits += nb - if w.nbits >= 48 { - w.writeOutBits() - } -} - -func (w *huffmanBitWriter) writeBytes(bytes []byte) { - if w.err != nil { - return - } - n := w.nbytes - if w.nbits&7 != 0 { - w.err = InternalError("writeBytes with unfinished bits") - return - } - for w.nbits != 0 { - w.bytes[n] = byte(w.bits) - w.bits >>= 8 - w.nbits -= 8 - n++ - } - if n != 0 { - w.write(w.bytes[:n]) - } - w.nbytes = 0 - w.write(bytes) -} - -// RFC 1951 3.2.7 specifies a special run-length encoding for specifying -// the literal and offset lengths arrays (which are concatenated into a single -// array). This method generates that run-length encoding. -// -// The result is written into the codegen array, and the frequencies -// of each code is written into the codegenFreq array. -// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional -// information. Code badCode is an end marker -// -// numLiterals The number of literals in literalEncoding -// numOffsets The number of offsets in offsetEncoding -// litenc, offenc The literal and offset encoder to use -func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) { - for i := range w.codegenFreq { - w.codegenFreq[i] = 0 - } - // Note that we are using codegen both as a temporary variable for holding - // a copy of the frequencies, and as the place where we put the result. - // This is fine because the output is always shorter than the input used - // so far. - codegen := w.codegen[:] // cache - // Copy the concatenated code sizes to codegen. Put a marker at the end. - cgnl := codegen[:numLiterals] - for i := range cgnl { - cgnl[i] = litEnc.codes[i].len() - } - - cgnl = codegen[numLiterals : numLiterals+numOffsets] - for i := range cgnl { - cgnl[i] = offEnc.codes[i].len() - } - codegen[numLiterals+numOffsets] = badCode - - size := codegen[0] - count := 1 - outIndex := 0 - for inIndex := 1; size != badCode; inIndex++ { - // INVARIANT: We have seen "count" copies of size that have not yet - // had output generated for them. - nextSize := codegen[inIndex] - if nextSize == size { - count++ - continue - } - // We need to generate codegen indicating "count" of size. - if size != 0 { - codegen[outIndex] = size - outIndex++ - w.codegenFreq[size]++ - count-- - for count >= 3 { - n := 6 - if n > count { - n = count - } - codegen[outIndex] = 16 - outIndex++ - codegen[outIndex] = uint8(n - 3) - outIndex++ - w.codegenFreq[16]++ - count -= n - } - } else { - for count >= 11 { - n := 138 - if n > count { - n = count - } - codegen[outIndex] = 18 - outIndex++ - codegen[outIndex] = uint8(n - 11) - outIndex++ - w.codegenFreq[18]++ - count -= n - } - if count >= 3 { - // count >= 3 && count <= 10 - codegen[outIndex] = 17 - outIndex++ - codegen[outIndex] = uint8(count - 3) - outIndex++ - w.codegenFreq[17]++ - count = 0 - } - } - count-- - for ; count >= 0; count-- { - codegen[outIndex] = size - outIndex++ - w.codegenFreq[size]++ - } - // Set up invariant for next time through the loop. - size = nextSize - count = 1 - } - // Marker indicating the end of the codegen. - codegen[outIndex] = badCode -} - -func (w *huffmanBitWriter) codegens() int { - numCodegens := len(w.codegenFreq) - for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { - numCodegens-- - } - return numCodegens -} - -func (w *huffmanBitWriter) headerSize() (size, numCodegens int) { - numCodegens = len(w.codegenFreq) - for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { - numCodegens-- - } - return 3 + 5 + 5 + 4 + (3 * numCodegens) + - w.codegenEncoding.bitLength(w.codegenFreq[:]) + - int(w.codegenFreq[16])*2 + - int(w.codegenFreq[17])*3 + - int(w.codegenFreq[18])*7, numCodegens -} - -// dynamicSize returns the size of dynamically encoded data in bits. -func (w *huffmanBitWriter) dynamicReuseSize(litEnc, offEnc *huffmanEncoder) (size int) { - size = litEnc.bitLength(w.literalFreq[:]) + - offEnc.bitLength(w.offsetFreq[:]) - return size -} - -// dynamicSize returns the size of dynamically encoded data in bits. -func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { - header, numCodegens := w.headerSize() - size = header + - litEnc.bitLength(w.literalFreq[:]) + - offEnc.bitLength(w.offsetFreq[:]) + - extraBits - return size, numCodegens -} - -// extraBitSize will return the number of bits that will be written -// as "extra" bits on matches. -func (w *huffmanBitWriter) extraBitSize() int { - total := 0 - for i, n := range w.literalFreq[257:literalCount] { - total += int(n) * int(lengthExtraBits[i&31]) - } - for i, n := range w.offsetFreq[:offsetCodeCount] { - total += int(n) * int(offsetExtraBits[i&31]) - } - return total -} - -// fixedSize returns the size of dynamically encoded data in bits. -func (w *huffmanBitWriter) fixedSize(extraBits int) int { - return 3 + - fixedLiteralEncoding.bitLength(w.literalFreq[:]) + - fixedOffsetEncoding.bitLength(w.offsetFreq[:]) + - extraBits -} - -// storedSize calculates the stored size, including header. -// The function returns the size in bits and whether the block -// fits inside a single block. -func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) { - if in == nil { - return 0, false - } - if len(in) <= maxStoreBlockSize { - return (len(in) + 5) * 8, true - } - return 0, false -} - -func (w *huffmanBitWriter) writeCode(c hcode) { - // The function does not get inlined if we "& 63" the shift. - w.bits |= c.code64() << (w.nbits & 63) - w.nbits += c.len() - if w.nbits >= 48 { - w.writeOutBits() - } -} - -// writeOutBits will write bits to the buffer. -func (w *huffmanBitWriter) writeOutBits() { - bits := w.bits - w.bits >>= 48 - w.nbits -= 48 - n := w.nbytes - - // We over-write, but faster... - binary.LittleEndian.PutUint64(w.bytes[n:], bits) - n += 6 - - if n >= bufferFlushSize { - if w.err != nil { - n = 0 - return - } - w.write(w.bytes[:n]) - n = 0 - } - - w.nbytes = n -} - -// Write the header of a dynamic Huffman block to the output stream. -// -// numLiterals The number of literals specified in codegen -// numOffsets The number of offsets specified in codegen -// numCodegens The number of codegens used in codegen -func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) { - if w.err != nil { - return - } - var firstBits int32 = 4 - if isEof { - firstBits = 5 - } - w.writeBits(firstBits, 3) - w.writeBits(int32(numLiterals-257), 5) - w.writeBits(int32(numOffsets-1), 5) - w.writeBits(int32(numCodegens-4), 4) - - for i := 0; i < numCodegens; i++ { - value := uint(w.codegenEncoding.codes[codegenOrder[i]].len()) - w.writeBits(int32(value), 3) - } - - i := 0 - for { - var codeWord = uint32(w.codegen[i]) - i++ - if codeWord == badCode { - break - } - w.writeCode(w.codegenEncoding.codes[codeWord]) - - switch codeWord { - case 16: - w.writeBits(int32(w.codegen[i]), 2) - i++ - case 17: - w.writeBits(int32(w.codegen[i]), 3) - i++ - case 18: - w.writeBits(int32(w.codegen[i]), 7) - i++ - } - } -} - -// writeStoredHeader will write a stored header. -// If the stored block is only used for EOF, -// it is replaced with a fixed huffman block. -func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { - if w.err != nil { - return - } - if w.lastHeader > 0 { - // We owe an EOB - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } - - // To write EOF, use a fixed encoding block. 10 bits instead of 5 bytes. - if length == 0 && isEof { - w.writeFixedHeader(isEof) - // EOB: 7 bits, value: 0 - w.writeBits(0, 7) - w.flush() - return - } - - var flag int32 - if isEof { - flag = 1 - } - w.writeBits(flag, 3) - w.flush() - w.writeBits(int32(length), 16) - w.writeBits(int32(^uint16(length)), 16) -} - -func (w *huffmanBitWriter) writeFixedHeader(isEof bool) { - if w.err != nil { - return - } - if w.lastHeader > 0 { - // We owe an EOB - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } - - // Indicate that we are a fixed Huffman block - var value int32 = 2 - if isEof { - value = 3 - } - w.writeBits(value, 3) -} - -// writeBlock will write a block of tokens with the smallest encoding. -// The original input can be supplied, and if the huffman encoded data -// is larger than the original bytes, the data will be written as a -// stored block. -// If the input is nil, the tokens will always be Huffman encoded. -func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) { - if w.err != nil { - return - } - - tokens.AddEOB() - if w.lastHeader > 0 { - // We owe an EOB - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } - numLiterals, numOffsets := w.indexTokens(tokens, false) - w.generate() - var extraBits int - storedSize, storable := w.storedSize(input) - if storable { - extraBits = w.extraBitSize() - } - - // Figure out smallest code. - // Fixed Huffman baseline. - var literalEncoding = fixedLiteralEncoding - var offsetEncoding = fixedOffsetEncoding - var size = math.MaxInt32 - if tokens.n < maxPredefinedTokens { - size = w.fixedSize(extraBits) - } - - // Dynamic Huffman? - var numCodegens int - - // Generate codegen and codegenFrequencies, which indicates how to encode - // the literalEncoding and the offsetEncoding. - w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) - w.codegenEncoding.generate(w.codegenFreq[:], 7) - dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) - - if dynamicSize < size { - size = dynamicSize - literalEncoding = w.literalEncoding - offsetEncoding = w.offsetEncoding - } - - // Stored bytes? - if storable && storedSize <= size { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - - // Huffman. - if literalEncoding == fixedLiteralEncoding { - w.writeFixedHeader(eof) - } else { - w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) - } - - // Write the tokens. - w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes) -} - -// writeBlockDynamic encodes a block using a dynamic Huffman table. -// This should be used if the symbols used have a disproportionate -// histogram distribution. -// If input is supplied and the compression savings are below 1/16th of the -// input size the block is stored. -func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) { - if w.err != nil { - return - } - - sync = sync || eof - if sync { - tokens.AddEOB() - } - - // We cannot reuse pure huffman table, and must mark as EOF. - if (w.lastHuffMan || eof) && w.lastHeader > 0 { - // We will not try to reuse. - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - w.lastHuffMan = false - } - - // fillReuse enables filling of empty values. - // This will make encodings always reusable without testing. - // However, this does not appear to benefit on most cases. - const fillReuse = false - - // Check if we can reuse... - if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) { - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } - - numLiterals, numOffsets := w.indexTokens(tokens, !sync) - extraBits := 0 - ssize, storable := w.storedSize(input) - - const usePrefs = true - if storable || w.lastHeader > 0 { - extraBits = w.extraBitSize() - } - - var size int - - // Check if we should reuse. - if w.lastHeader > 0 { - // Estimate size for using a new table. - // Use the previous header size as the best estimate. - newSize := w.lastHeader + tokens.EstimatedBits() - newSize += int(w.literalEncoding.codes[endBlockMarker].len()) + newSize>>w.logNewTablePenalty - - // The estimated size is calculated as an optimal table. - // We add a penalty to make it more realistic and re-use a bit more. - reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits - - // Check if a new table is better. - if newSize < reuseSize { - // Write the EOB we owe. - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - size = newSize - w.lastHeader = 0 - } else { - size = reuseSize - } - - if tokens.n < maxPredefinedTokens { - if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size { - // Check if we get a reasonable size decrease. - if storable && ssize <= size { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - w.writeFixedHeader(eof) - if !sync { - tokens.AddEOB() - } - w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) - return - } - } - // Check if we get a reasonable size decrease. - if storable && ssize <= size { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - } - - // We want a new block/table - if w.lastHeader == 0 { - if fillReuse && !sync { - w.fillTokens() - numLiterals, numOffsets = maxNumLit, maxNumDist - } else { - w.literalFreq[endBlockMarker] = 1 - } - - w.generate() - // Generate codegen and codegenFrequencies, which indicates how to encode - // the literalEncoding and the offsetEncoding. - w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) - w.codegenEncoding.generate(w.codegenFreq[:], 7) - - var numCodegens int - if fillReuse && !sync { - // Reindex for accurate size... - w.indexTokens(tokens, true) - } - size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) - - // Store predefined, if we don't get a reasonable improvement. - if tokens.n < maxPredefinedTokens { - if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size { - // Store bytes, if we don't get an improvement. - if storable && ssize <= preSize { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - w.writeFixedHeader(eof) - if !sync { - tokens.AddEOB() - } - w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) - return - } - } - - if storable && ssize <= size { - // Store bytes, if we don't get an improvement. - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - - // Write Huffman table. - w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) - if !sync { - w.lastHeader, _ = w.headerSize() - } - w.lastHuffMan = false - } - - if sync { - w.lastHeader = 0 - } - // Write the tokens. - w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes) -} - -func (w *huffmanBitWriter) fillTokens() { - for i, v := range w.literalFreq[:literalCount] { - if v == 0 { - w.literalFreq[i] = 1 - } - } - for i, v := range w.offsetFreq[:offsetCodeCount] { - if v == 0 { - w.offsetFreq[i] = 1 - } - } -} - -// indexTokens indexes a slice of tokens, and updates -// literalFreq and offsetFreq, and generates literalEncoding -// and offsetEncoding. -// The number of literal and offset tokens is returned. -func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) { - //copy(w.literalFreq[:], t.litHist[:]) - *(*[256]uint16)(w.literalFreq[:]) = t.litHist - //copy(w.literalFreq[256:], t.extraHist[:]) - *(*[32]uint16)(w.literalFreq[256:]) = t.extraHist - w.offsetFreq = t.offHist - - if t.n == 0 { - return - } - if filled { - return maxNumLit, maxNumDist - } - // get the number of literals - numLiterals = len(w.literalFreq) - for w.literalFreq[numLiterals-1] == 0 { - numLiterals-- - } - // get the number of offsets - numOffsets = len(w.offsetFreq) - for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 { - numOffsets-- - } - if numOffsets == 0 { - // We haven't found a single match. If we want to go with the dynamic encoding, - // we should count at least one offset to be sure that the offset huffman tree could be encoded. - w.offsetFreq[0] = 1 - numOffsets = 1 - } - return -} - -func (w *huffmanBitWriter) generate() { - w.literalEncoding.generate(w.literalFreq[:literalCount], 15) - w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15) -} - -// writeTokens writes a slice of tokens to the output. -// codes for literal and offset encoding must be supplied. -func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) { - if w.err != nil { - return - } - if len(tokens) == 0 { - return - } - - // Only last token should be endBlockMarker. - var deferEOB bool - if tokens[len(tokens)-1] == endBlockMarker { - tokens = tokens[:len(tokens)-1] - deferEOB = true - } - - // Create slices up to the next power of two to avoid bounds checks. - lits := leCodes[:256] - offs := oeCodes[:32] - lengths := leCodes[lengthCodesStart:] - lengths = lengths[:32] - - // Go 1.16 LOVES having these on stack. - bits, nbits, nbytes := w.bits, w.nbits, w.nbytes - - for _, t := range tokens { - if t < 256 { - //w.writeCode(lits[t.literal()]) - c := lits[t] - bits |= c.code64() << (nbits & 63) - nbits += c.len() - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - continue - } - - // Write the length - length := t.length() - lengthCode := lengthCode(length) & 31 - if false { - w.writeCode(lengths[lengthCode]) - } else { - // inlined - c := lengths[lengthCode] - bits |= c.code64() << (nbits & 63) - nbits += c.len() - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - } - - if lengthCode >= lengthExtraBitsMinCode { - extraLengthBits := lengthExtraBits[lengthCode] - //w.writeBits(extraLength, extraLengthBits) - extraLength := int32(length - lengthBase[lengthCode]) - bits |= uint64(extraLength) << (nbits & 63) - nbits += extraLengthBits - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - } - // Write the offset - offset := t.offset() - offsetCode := (offset >> 16) & 31 - if false { - w.writeCode(offs[offsetCode]) - } else { - // inlined - c := offs[offsetCode] - bits |= c.code64() << (nbits & 63) - nbits += c.len() - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - } - - if offsetCode >= offsetExtraBitsMinCode { - offsetComb := offsetCombined[offsetCode] - //w.writeBits(extraOffset, extraOffsetBits) - bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63) - nbits += uint8(offsetComb) - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - } - } - // Restore... - w.bits, w.nbits, w.nbytes = bits, nbits, nbytes - - if deferEOB { - w.writeCode(leCodes[endBlockMarker]) - } -} - -// huffOffset is a static offset encoder used for huffman only encoding. -// It can be reused since we will not be encoding offset values. -var huffOffset *huffmanEncoder - -func init() { - w := newHuffmanBitWriter(nil) - w.offsetFreq[0] = 1 - huffOffset = newHuffmanEncoder(offsetCodeCount) - huffOffset.generate(w.offsetFreq[:offsetCodeCount], 15) -} - -// writeBlockHuff encodes a block of bytes as either -// Huffman encoded literals or uncompressed bytes if the -// results only gains very little from compression. -func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { - if w.err != nil { - return - } - - // Clear histogram - for i := range w.literalFreq[:] { - w.literalFreq[i] = 0 - } - if !w.lastHuffMan { - for i := range w.offsetFreq[:] { - w.offsetFreq[i] = 0 - } - } - - const numLiterals = endBlockMarker + 1 - const numOffsets = 1 - - // Add everything as literals - // We have to estimate the header size. - // Assume header is around 70 bytes: - // https://stackoverflow.com/a/25454430 - const guessHeaderSizeBits = 70 * 8 - histogram(input, w.literalFreq[:numLiterals]) - ssize, storable := w.storedSize(input) - if storable && len(input) > 1024 { - // Quick check for incompressible content. - abs := float64(0) - avg := float64(len(input)) / 256 - max := float64(len(input) * 2) - for _, v := range w.literalFreq[:256] { - diff := float64(v) - avg - abs += diff * diff - if abs > max { - break - } - } - if abs < max { - if debugDeflate { - fmt.Println("stored", abs, "<", max) - } - // No chance we can compress this... - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - } - w.literalFreq[endBlockMarker] = 1 - w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15) - estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals]) - if estBits < math.MaxInt32 { - estBits += w.lastHeader - if w.lastHeader == 0 { - estBits += guessHeaderSizeBits - } - estBits += estBits >> w.logNewTablePenalty - } - - // Store bytes, if we don't get a reasonable improvement. - if storable && ssize <= estBits { - if debugDeflate { - fmt.Println("stored,", ssize, "<=", estBits) - } - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - - if w.lastHeader > 0 { - reuseSize := w.literalEncoding.canReuseBits(w.literalFreq[:256]) - - if estBits < reuseSize { - if debugDeflate { - fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes") - } - // We owe an EOB - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } else if debugDeflate { - fmt.Println("reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8) - } - } - - count := 0 - if w.lastHeader == 0 { - // Use the temp encoding, so swap. - w.literalEncoding, w.tmpLitEncoding = w.tmpLitEncoding, w.literalEncoding - // Generate codegen and codegenFrequencies, which indicates how to encode - // the literalEncoding and the offsetEncoding. - w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset) - w.codegenEncoding.generate(w.codegenFreq[:], 7) - numCodegens := w.codegens() - - // Huffman. - w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) - w.lastHuffMan = true - w.lastHeader, _ = w.headerSize() - if debugDeflate { - count += w.lastHeader - fmt.Println("header:", count/8) - } - } - - encoding := w.literalEncoding.codes[:256] - // Go 1.16 LOVES having these on stack. At least 1.5x the speed. - bits, nbits, nbytes := w.bits, w.nbits, w.nbytes - - if debugDeflate { - count -= int(nbytes)*8 + int(nbits) - } - // Unroll, write 3 codes/loop. - // Fastest number of unrolls. - for len(input) > 3 { - // We must have at least 48 bits free. - if nbits >= 8 { - n := nbits >> 3 - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - bits >>= (n * 8) & 63 - nbits -= n * 8 - nbytes += n - } - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - if debugDeflate { - count += int(nbytes) * 8 - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - a, b := encoding[input[0]], encoding[input[1]] - bits |= a.code64() << (nbits & 63) - bits |= b.code64() << ((nbits + a.len()) & 63) - c := encoding[input[2]] - nbits += b.len() + a.len() - bits |= c.code64() << (nbits & 63) - nbits += c.len() - input = input[3:] - } - - // Remaining... - for _, t := range input { - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - if debugDeflate { - count += int(nbytes) * 8 - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - // Bitwriting inlined, ~30% speedup - c := encoding[t] - bits |= c.code64() << (nbits & 63) - - nbits += c.len() - if debugDeflate { - count += int(c.len()) - } - } - // Restore... - w.bits, w.nbits, w.nbytes = bits, nbits, nbytes - - if debugDeflate { - nb := count + int(nbytes)*8 + int(nbits) - fmt.Println("wrote", nb, "bits,", nb/8, "bytes.") - } - // Flush if needed to have space. - if w.nbits >= 48 { - w.writeOutBits() - } - - if eof || sync { - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - w.lastHuffMan = false - } -} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go deleted file mode 100644 index be7b58b4..00000000 --- a/vendor/github.com/klauspost/compress/flate/huffman_code.go +++ /dev/null @@ -1,417 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -import ( - "math" - "math/bits" -) - -const ( - maxBitsLimit = 16 - // number of valid literals - literalCount = 286 -) - -// hcode is a huffman code with a bit code and bit length. -type hcode uint32 - -func (h hcode) len() uint8 { - return uint8(h) -} - -func (h hcode) code64() uint64 { - return uint64(h >> 8) -} - -func (h hcode) zero() bool { - return h == 0 -} - -type huffmanEncoder struct { - codes []hcode - bitCount [17]int32 - - // Allocate a reusable buffer with the longest possible frequency table. - // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount. - // The largest of these is literalCount, so we allocate for that case. - freqcache [literalCount + 1]literalNode -} - -type literalNode struct { - literal uint16 - freq uint16 -} - -// A levelInfo describes the state of the constructed tree for a given depth. -type levelInfo struct { - // Our level. for better printing - level int32 - - // The frequency of the last node at this level - lastFreq int32 - - // The frequency of the next character to add to this level - nextCharFreq int32 - - // The frequency of the next pair (from level below) to add to this level. - // Only valid if the "needed" value of the next lower level is 0. - nextPairFreq int32 - - // The number of chains remaining to generate for this level before moving - // up to the next level - needed int32 -} - -// set sets the code and length of an hcode. -func (h *hcode) set(code uint16, length uint8) { - *h = hcode(length) | (hcode(code) << 8) -} - -func newhcode(code uint16, length uint8) hcode { - return hcode(length) | (hcode(code) << 8) -} - -func reverseBits(number uint16, bitLength byte) uint16 { - return bits.Reverse16(number << ((16 - bitLength) & 15)) -} - -func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} } - -func newHuffmanEncoder(size int) *huffmanEncoder { - // Make capacity to next power of two. - c := uint(bits.Len32(uint32(size - 1))) - return &huffmanEncoder{codes: make([]hcode, size, 1<= 3 -// The cases of 0, 1, and 2 literals are handled by special case code. -// -// list An array of the literals with non-zero frequencies -// -// and their associated frequencies. The array is in order of increasing -// frequency, and has as its last element a special element with frequency -// MaxInt32 -// -// maxBits The maximum number of bits that should be used to encode any literal. -// -// Must be less than 16. -// -// return An integer array in which array[i] indicates the number of literals -// -// that should be encoded in i bits. -func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { - if maxBits >= maxBitsLimit { - panic("flate: maxBits too large") - } - n := int32(len(list)) - list = list[0 : n+1] - list[n] = maxNode() - - // The tree can't have greater depth than n - 1, no matter what. This - // saves a little bit of work in some small cases - if maxBits > n-1 { - maxBits = n - 1 - } - - // Create information about each of the levels. - // A bogus "Level 0" whose sole purpose is so that - // level1.prev.needed==0. This makes level1.nextPairFreq - // be a legitimate value that never gets chosen. - var levels [maxBitsLimit]levelInfo - // leafCounts[i] counts the number of literals at the left - // of ancestors of the rightmost node at level i. - // leafCounts[i][j] is the number of literals at the left - // of the level j ancestor. - var leafCounts [maxBitsLimit][maxBitsLimit]int32 - - // Descending to only have 1 bounds check. - l2f := int32(list[2].freq) - l1f := int32(list[1].freq) - l0f := int32(list[0].freq) + int32(list[1].freq) - - for level := int32(1); level <= maxBits; level++ { - // For every level, the first two items are the first two characters. - // We initialize the levels as if we had already figured this out. - levels[level] = levelInfo{ - level: level, - lastFreq: l1f, - nextCharFreq: l2f, - nextPairFreq: l0f, - } - leafCounts[level][level] = 2 - if level == 1 { - levels[level].nextPairFreq = math.MaxInt32 - } - } - - // We need a total of 2*n - 2 items at top level and have already generated 2. - levels[maxBits].needed = 2*n - 4 - - level := uint32(maxBits) - for level < 16 { - l := &levels[level] - if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 { - // We've run out of both leafs and pairs. - // End all calculations for this level. - // To make sure we never come back to this level or any lower level, - // set nextPairFreq impossibly large. - l.needed = 0 - levels[level+1].nextPairFreq = math.MaxInt32 - level++ - continue - } - - prevFreq := l.lastFreq - if l.nextCharFreq < l.nextPairFreq { - // The next item on this row is a leaf node. - n := leafCounts[level][level] + 1 - l.lastFreq = l.nextCharFreq - // Lower leafCounts are the same of the previous node. - leafCounts[level][level] = n - e := list[n] - if e.literal < math.MaxUint16 { - l.nextCharFreq = int32(e.freq) - } else { - l.nextCharFreq = math.MaxInt32 - } - } else { - // The next item on this row is a pair from the previous row. - // nextPairFreq isn't valid until we generate two - // more values in the level below - l.lastFreq = l.nextPairFreq - // Take leaf counts from the lower level, except counts[level] remains the same. - if true { - save := leafCounts[level][level] - leafCounts[level] = leafCounts[level-1] - leafCounts[level][level] = save - } else { - copy(leafCounts[level][:level], leafCounts[level-1][:level]) - } - levels[l.level-1].needed = 2 - } - - if l.needed--; l.needed == 0 { - // We've done everything we need to do for this level. - // Continue calculating one level up. Fill in nextPairFreq - // of that level with the sum of the two nodes we've just calculated on - // this level. - if l.level == maxBits { - // All done! - break - } - levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq - level++ - } else { - // If we stole from below, move down temporarily to replenish it. - for levels[level-1].needed > 0 { - level-- - } - } - } - - // Somethings is wrong if at the end, the top level is null or hasn't used - // all of the leaves. - if leafCounts[maxBits][maxBits] != n { - panic("leafCounts[maxBits][maxBits] != n") - } - - bitCount := h.bitCount[:maxBits+1] - bits := 1 - counts := &leafCounts[maxBits] - for level := maxBits; level > 0; level-- { - // chain.leafCount gives the number of literals requiring at least "bits" - // bits to encode. - bitCount[bits] = counts[level] - counts[level-1] - bits++ - } - return bitCount -} - -// Look at the leaves and assign them a bit count and an encoding as specified -// in RFC 1951 3.2.2 -func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) { - code := uint16(0) - for n, bits := range bitCount { - code <<= 1 - if n == 0 || bits == 0 { - continue - } - // The literals list[len(list)-bits] .. list[len(list)-bits] - // are encoded using "bits" bits, and get the values - // code, code + 1, .... The code values are - // assigned in literal order (not frequency order). - chunk := list[len(list)-int(bits):] - - sortByLiteral(chunk) - for _, node := range chunk { - h.codes[node.literal] = newhcode(reverseBits(code, uint8(n)), uint8(n)) - code++ - } - list = list[0 : len(list)-int(bits)] - } -} - -// Update this Huffman Code object to be the minimum code for the specified frequency count. -// -// freq An array of frequencies, in which frequency[i] gives the frequency of literal i. -// maxBits The maximum number of bits to use for any literal. -func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { - list := h.freqcache[:len(freq)+1] - codes := h.codes[:len(freq)] - // Number of non-zero literals - count := 0 - // Set list to be the set of all non-zero literals and their frequencies - for i, f := range freq { - if f != 0 { - list[count] = literalNode{uint16(i), f} - count++ - } else { - codes[i] = 0 - } - } - list[count] = literalNode{} - - list = list[:count] - if count <= 2 { - // Handle the small cases here, because they are awkward for the general case code. With - // two or fewer literals, everything has bit length 1. - for i, node := range list { - // "list" is in order of increasing literal value. - h.codes[node.literal].set(uint16(i), 1) - } - return - } - sortByFreq(list) - - // Get the number of literals for each bit count - bitCount := h.bitCounts(list, maxBits) - // And do the assignment - h.assignEncodingAndSize(bitCount, list) -} - -// atLeastOne clamps the result between 1 and 15. -func atLeastOne(v float32) float32 { - if v < 1 { - return 1 - } - if v > 15 { - return 15 - } - return v -} - -func histogram(b []byte, h []uint16) { - if true && len(b) >= 8<<10 { - // Split for bigger inputs - histogramSplit(b, h) - } else { - h = h[:256] - for _, t := range b { - h[t]++ - } - } -} - -func histogramSplit(b []byte, h []uint16) { - // Tested, and slightly faster than 2-way. - // Writing to separate arrays and combining is also slightly slower. - h = h[:256] - for len(b)&3 != 0 { - h[b[0]]++ - b = b[1:] - } - n := len(b) / 4 - x, y, z, w := b[:n], b[n:], b[n+n:], b[n+n+n:] - y, z, w = y[:len(x)], z[:len(x)], w[:len(x)] - for i, t := range x { - v0 := &h[t] - v1 := &h[y[i]] - v3 := &h[w[i]] - v2 := &h[z[i]] - *v0++ - *v1++ - *v2++ - *v3++ - } -} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go deleted file mode 100644 index 6c05ba8c..00000000 --- a/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -// Sort sorts data. -// It makes one call to data.Len to determine n, and O(n*log(n)) calls to -// data.Less and data.Swap. The sort is not guaranteed to be stable. -func sortByFreq(data []literalNode) { - n := len(data) - quickSortByFreq(data, 0, n, maxDepth(n)) -} - -func quickSortByFreq(data []literalNode, a, b, maxDepth int) { - for b-a > 12 { // Use ShellSort for slices <= 12 elements - if maxDepth == 0 { - heapSort(data, a, b) - return - } - maxDepth-- - mlo, mhi := doPivotByFreq(data, a, b) - // Avoiding recursion on the larger subproblem guarantees - // a stack depth of at most lg(b-a). - if mlo-a < b-mhi { - quickSortByFreq(data, a, mlo, maxDepth) - a = mhi // i.e., quickSortByFreq(data, mhi, b) - } else { - quickSortByFreq(data, mhi, b, maxDepth) - b = mlo // i.e., quickSortByFreq(data, a, mlo) - } - } - if b-a > 1 { - // Do ShellSort pass with gap 6 - // It could be written in this simplified form cause b-a <= 12 - for i := a + 6; i < b; i++ { - if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq { - data[i], data[i-6] = data[i-6], data[i] - } - } - insertionSortByFreq(data, a, b) - } -} - -func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) { - m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. - if hi-lo > 40 { - // Tukey's ``Ninther,'' median of three medians of three. - s := (hi - lo) / 8 - medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s) - medianOfThreeSortByFreq(data, m, m-s, m+s) - medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s) - } - medianOfThreeSortByFreq(data, lo, m, hi-1) - - // Invariants are: - // data[lo] = pivot (set up by ChoosePivot) - // data[lo < i < a] < pivot - // data[a <= i < b] <= pivot - // data[b <= i < c] unexamined - // data[c <= i < hi-1] > pivot - // data[hi-1] >= pivot - pivot := lo - a, c := lo+1, hi-1 - - for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { - } - b := a - for { - for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot - } - for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot - } - if b >= c { - break - } - // data[b] > pivot; data[c-1] <= pivot - data[b], data[c-1] = data[c-1], data[b] - b++ - c-- - } - // If hi-c<3 then there are duplicates (by property of median of nine). - // Let's be a bit more conservative, and set border to 5. - protect := hi-c < 5 - if !protect && hi-c < (hi-lo)/4 { - // Lets test some points for equality to pivot - dups := 0 - if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot - data[c], data[hi-1] = data[hi-1], data[c] - c++ - dups++ - } - if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot - b-- - dups++ - } - // m-lo = (hi-lo)/2 > 6 - // b-lo > (hi-lo)*3/4-1 > 8 - // ==> m < b ==> data[m] <= pivot - if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot - data[m], data[b-1] = data[b-1], data[m] - b-- - dups++ - } - // if at least 2 points are equal to pivot, assume skewed distribution - protect = dups > 1 - } - if protect { - // Protect against a lot of duplicates - // Add invariant: - // data[a <= i < b] unexamined - // data[b <= i < c] = pivot - for { - for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot - } - for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot - } - if a >= b { - break - } - // data[a] == pivot; data[b-1] < pivot - data[a], data[b-1] = data[b-1], data[a] - a++ - b-- - } - } - // Swap pivot into middle - data[pivot], data[b-1] = data[b-1], data[pivot] - return b - 1, c -} - -// Insertion sort -func insertionSortByFreq(data []literalNode, a, b int) { - for i := a + 1; i < b; i++ { - for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- { - data[j], data[j-1] = data[j-1], data[j] - } - } -} - -// quickSortByFreq, loosely following Bentley and McIlroy, -// ``Engineering a Sort Function,'' SP&E November 1993. - -// medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. -func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) { - // sort 3 elements - if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { - data[m1], data[m0] = data[m0], data[m1] - } - // data[m0] <= data[m1] - if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq { - data[m2], data[m1] = data[m1], data[m2] - // data[m0] <= data[m2] && data[m1] < data[m2] - if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { - data[m1], data[m0] = data[m0], data[m1] - } - } - // now data[m0] <= data[m1] <= data[m2] -} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go deleted file mode 100644 index 93f1aea1..00000000 --- a/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -// Sort sorts data. -// It makes one call to data.Len to determine n, and O(n*log(n)) calls to -// data.Less and data.Swap. The sort is not guaranteed to be stable. -func sortByLiteral(data []literalNode) { - n := len(data) - quickSort(data, 0, n, maxDepth(n)) -} - -func quickSort(data []literalNode, a, b, maxDepth int) { - for b-a > 12 { // Use ShellSort for slices <= 12 elements - if maxDepth == 0 { - heapSort(data, a, b) - return - } - maxDepth-- - mlo, mhi := doPivot(data, a, b) - // Avoiding recursion on the larger subproblem guarantees - // a stack depth of at most lg(b-a). - if mlo-a < b-mhi { - quickSort(data, a, mlo, maxDepth) - a = mhi // i.e., quickSort(data, mhi, b) - } else { - quickSort(data, mhi, b, maxDepth) - b = mlo // i.e., quickSort(data, a, mlo) - } - } - if b-a > 1 { - // Do ShellSort pass with gap 6 - // It could be written in this simplified form cause b-a <= 12 - for i := a + 6; i < b; i++ { - if data[i].literal < data[i-6].literal { - data[i], data[i-6] = data[i-6], data[i] - } - } - insertionSort(data, a, b) - } -} -func heapSort(data []literalNode, a, b int) { - first := a - lo := 0 - hi := b - a - - // Build heap with greatest element at top. - for i := (hi - 1) / 2; i >= 0; i-- { - siftDown(data, i, hi, first) - } - - // Pop elements, largest first, into end of data. - for i := hi - 1; i >= 0; i-- { - data[first], data[first+i] = data[first+i], data[first] - siftDown(data, lo, i, first) - } -} - -// siftDown implements the heap property on data[lo, hi). -// first is an offset into the array where the root of the heap lies. -func siftDown(data []literalNode, lo, hi, first int) { - root := lo - for { - child := 2*root + 1 - if child >= hi { - break - } - if child+1 < hi && data[first+child].literal < data[first+child+1].literal { - child++ - } - if data[first+root].literal > data[first+child].literal { - return - } - data[first+root], data[first+child] = data[first+child], data[first+root] - root = child - } -} -func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) { - m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. - if hi-lo > 40 { - // Tukey's ``Ninther,'' median of three medians of three. - s := (hi - lo) / 8 - medianOfThree(data, lo, lo+s, lo+2*s) - medianOfThree(data, m, m-s, m+s) - medianOfThree(data, hi-1, hi-1-s, hi-1-2*s) - } - medianOfThree(data, lo, m, hi-1) - - // Invariants are: - // data[lo] = pivot (set up by ChoosePivot) - // data[lo < i < a] < pivot - // data[a <= i < b] <= pivot - // data[b <= i < c] unexamined - // data[c <= i < hi-1] > pivot - // data[hi-1] >= pivot - pivot := lo - a, c := lo+1, hi-1 - - for ; a < c && data[a].literal < data[pivot].literal; a++ { - } - b := a - for { - for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot - } - for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot - } - if b >= c { - break - } - // data[b] > pivot; data[c-1] <= pivot - data[b], data[c-1] = data[c-1], data[b] - b++ - c-- - } - // If hi-c<3 then there are duplicates (by property of median of nine). - // Let's be a bit more conservative, and set border to 5. - protect := hi-c < 5 - if !protect && hi-c < (hi-lo)/4 { - // Lets test some points for equality to pivot - dups := 0 - if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot - data[c], data[hi-1] = data[hi-1], data[c] - c++ - dups++ - } - if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot - b-- - dups++ - } - // m-lo = (hi-lo)/2 > 6 - // b-lo > (hi-lo)*3/4-1 > 8 - // ==> m < b ==> data[m] <= pivot - if data[m].literal > data[pivot].literal { // data[m] = pivot - data[m], data[b-1] = data[b-1], data[m] - b-- - dups++ - } - // if at least 2 points are equal to pivot, assume skewed distribution - protect = dups > 1 - } - if protect { - // Protect against a lot of duplicates - // Add invariant: - // data[a <= i < b] unexamined - // data[b <= i < c] = pivot - for { - for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot - } - for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot - } - if a >= b { - break - } - // data[a] == pivot; data[b-1] < pivot - data[a], data[b-1] = data[b-1], data[a] - a++ - b-- - } - } - // Swap pivot into middle - data[pivot], data[b-1] = data[b-1], data[pivot] - return b - 1, c -} - -// Insertion sort -func insertionSort(data []literalNode, a, b int) { - for i := a + 1; i < b; i++ { - for j := i; j > a && data[j].literal < data[j-1].literal; j-- { - data[j], data[j-1] = data[j-1], data[j] - } - } -} - -// maxDepth returns a threshold at which quicksort should switch -// to heapsort. It returns 2*ceil(lg(n+1)). -func maxDepth(n int) int { - var depth int - for i := n; i > 0; i >>= 1 { - depth++ - } - return depth * 2 -} - -// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. -func medianOfThree(data []literalNode, m1, m0, m2 int) { - // sort 3 elements - if data[m1].literal < data[m0].literal { - data[m1], data[m0] = data[m0], data[m1] - } - // data[m0] <= data[m1] - if data[m2].literal < data[m1].literal { - data[m2], data[m1] = data[m1], data[m2] - // data[m0] <= data[m2] && data[m1] < data[m2] - if data[m1].literal < data[m0].literal { - data[m1], data[m0] = data[m0], data[m1] - } - } - // now data[m0] <= data[m1] <= data[m2] -} diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go deleted file mode 100644 index 414c0bea..00000000 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ /dev/null @@ -1,793 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package flate implements the DEFLATE compressed data format, described in -// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file -// formats. -package flate - -import ( - "bufio" - "compress/flate" - "fmt" - "io" - "math/bits" - "sync" -) - -const ( - maxCodeLen = 16 // max length of Huffman code - maxCodeLenMask = 15 // mask for max length of Huffman code - // The next three numbers come from the RFC section 3.2.7, with the - // additional proviso in section 3.2.5 which implies that distance codes - // 30 and 31 should never occur in compressed data. - maxNumLit = 286 - maxNumDist = 30 - numCodes = 19 // number of codes in Huffman meta-code - - debugDecode = false -) - -// Value of length - 3 and extra bits. -type lengthExtra struct { - length, extra uint8 -} - -var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}} - -var bitMask32 = [32]uint32{ - 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, - 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, - 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, - 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, -} // up to 32 bits - -// Initialize the fixedHuffmanDecoder only once upon first use. -var fixedOnce sync.Once -var fixedHuffmanDecoder huffmanDecoder - -// A CorruptInputError reports the presence of corrupt input at a given offset. -type CorruptInputError = flate.CorruptInputError - -// An InternalError reports an error in the flate code itself. -type InternalError string - -func (e InternalError) Error() string { return "flate: internal error: " + string(e) } - -// A ReadError reports an error encountered while reading input. -// -// Deprecated: No longer returned. -type ReadError = flate.ReadError - -// A WriteError reports an error encountered while writing output. -// -// Deprecated: No longer returned. -type WriteError = flate.WriteError - -// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to -// to switch to a new underlying Reader. This permits reusing a ReadCloser -// instead of allocating a new one. -type Resetter interface { - // Reset discards any buffered data and resets the Resetter as if it was - // newly initialized with the given reader. - Reset(r io.Reader, dict []byte) error -} - -// The data structure for decoding Huffman tables is based on that of -// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits), -// For codes smaller than the table width, there are multiple entries -// (each combination of trailing bits has the same value). For codes -// larger than the table width, the table contains a link to an overflow -// table. The width of each entry in the link table is the maximum code -// size minus the chunk width. -// -// Note that you can do a lookup in the table even without all bits -// filled. Since the extra bits are zero, and the DEFLATE Huffman codes -// have the property that shorter codes come before longer ones, the -// bit length estimate in the result is a lower bound on the actual -// number of bits. -// -// See the following: -// http://www.gzip.org/algorithm.txt - -// chunk & 15 is number of bits -// chunk >> 4 is value, including table link - -const ( - huffmanChunkBits = 9 - huffmanNumChunks = 1 << huffmanChunkBits - huffmanCountMask = 15 - huffmanValueShift = 4 -) - -type huffmanDecoder struct { - maxRead int // the maximum number of bits we can read and not overread - chunks *[huffmanNumChunks]uint16 // chunks as described above - links [][]uint16 // overflow links - linkMask uint32 // mask the width of the link table -} - -// Initialize Huffman decoding tables from array of code lengths. -// Following this function, h is guaranteed to be initialized into a complete -// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a -// degenerate case where the tree has only a single symbol with length 1. Empty -// trees are permitted. -func (h *huffmanDecoder) init(lengths []int) bool { - // Sanity enables additional runtime tests during Huffman - // table construction. It's intended to be used during - // development to supplement the currently ad-hoc unit tests. - const sanity = false - - if h.chunks == nil { - h.chunks = &[huffmanNumChunks]uint16{} - } - if h.maxRead != 0 { - *h = huffmanDecoder{chunks: h.chunks, links: h.links} - } - - // Count number of codes of each length, - // compute maxRead and max length. - var count [maxCodeLen]int - var min, max int - for _, n := range lengths { - if n == 0 { - continue - } - if min == 0 || n < min { - min = n - } - if n > max { - max = n - } - count[n&maxCodeLenMask]++ - } - - // Empty tree. The decompressor.huffSym function will fail later if the tree - // is used. Technically, an empty tree is only valid for the HDIST tree and - // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree - // is guaranteed to fail since it will attempt to use the tree to decode the - // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is - // guaranteed to fail later since the compressed data section must be - // composed of at least one symbol (the end-of-block marker). - if max == 0 { - return true - } - - code := 0 - var nextcode [maxCodeLen]int - for i := min; i <= max; i++ { - code <<= 1 - nextcode[i&maxCodeLenMask] = code - code += count[i&maxCodeLenMask] - } - - // Check that the coding is complete (i.e., that we've - // assigned all 2-to-the-max possible bit sequences). - // Exception: To be compatible with zlib, we also need to - // accept degenerate single-code codings. See also - // TestDegenerateHuffmanCoding. - if code != 1< huffmanChunkBits { - numLinks := 1 << (uint(max) - huffmanChunkBits) - h.linkMask = uint32(numLinks - 1) - - // create link tables - link := nextcode[huffmanChunkBits+1] >> 1 - if cap(h.links) < huffmanNumChunks-link { - h.links = make([][]uint16, huffmanNumChunks-link) - } else { - h.links = h.links[:huffmanNumChunks-link] - } - for j := uint(link); j < huffmanNumChunks; j++ { - reverse := int(bits.Reverse16(uint16(j))) - reverse >>= uint(16 - huffmanChunkBits) - off := j - uint(link) - if sanity && h.chunks[reverse] != 0 { - panic("impossible: overwriting existing chunk") - } - h.chunks[reverse] = uint16(off<>= uint(16 - n) - if n <= huffmanChunkBits { - for off := reverse; off < len(h.chunks); off += 1 << uint(n) { - // We should never need to overwrite - // an existing chunk. Also, 0 is - // never a valid chunk, because the - // lower 4 "count" bits should be - // between 1 and 15. - if sanity && h.chunks[off] != 0 { - panic("impossible: overwriting existing chunk") - } - h.chunks[off] = chunk - } - } else { - j := reverse & (huffmanNumChunks - 1) - if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { - // Longer codes should have been - // associated with a link table above. - panic("impossible: not an indirect chunk") - } - value := h.chunks[j] >> huffmanValueShift - linktab := h.links[value] - reverse >>= huffmanChunkBits - for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { - if sanity && linktab[off] != 0 { - panic("impossible: overwriting existing chunk") - } - linktab[off] = chunk - } - } - } - - if sanity { - // Above we've sanity checked that we never overwrote - // an existing entry. Here we additionally check that - // we filled the tables completely. - for i, chunk := range h.chunks { - if chunk == 0 { - // As an exception, in the degenerate - // single-code case, we allow odd - // chunks to be missing. - if code == 1 && i%2 == 1 { - continue - } - panic("impossible: missing chunk") - } - } - for _, linktab := range h.links { - for _, chunk := range linktab { - if chunk == 0 { - panic("impossible: missing chunk") - } - } - } - } - - return true -} - -// The actual read interface needed by NewReader. -// If the passed in io.Reader does not also have ReadByte, -// the NewReader will introduce its own buffering. -type Reader interface { - io.Reader - io.ByteReader -} - -// Decompress state. -type decompressor struct { - // Input source. - r Reader - roffset int64 - - // Huffman decoders for literal/length, distance. - h1, h2 huffmanDecoder - - // Length arrays used to define Huffman codes. - bits *[maxNumLit + maxNumDist]int - codebits *[numCodes]int - - // Output history, buffer. - dict dictDecoder - - // Next step in the decompression, - // and decompression state. - step func(*decompressor) - stepState int - err error - toRead []byte - hl, hd *huffmanDecoder - copyLen int - copyDist int - - // Temporary buffer (avoids repeated allocation). - buf [4]byte - - // Input bits, in top of b. - b uint32 - - nb uint - final bool -} - -func (f *decompressor) nextBlock() { - for f.nb < 1+2 { - if f.err = f.moreBits(); f.err != nil { - return - } - } - f.final = f.b&1 == 1 - f.b >>= 1 - typ := f.b & 3 - f.b >>= 2 - f.nb -= 1 + 2 - switch typ { - case 0: - f.dataBlock() - if debugDecode { - fmt.Println("stored block") - } - case 1: - // compressed, fixed Huffman tables - f.hl = &fixedHuffmanDecoder - f.hd = nil - f.huffmanBlockDecoder()() - if debugDecode { - fmt.Println("predefinied huffman block") - } - case 2: - // compressed, dynamic Huffman tables - if f.err = f.readHuffman(); f.err != nil { - break - } - f.hl = &f.h1 - f.hd = &f.h2 - f.huffmanBlockDecoder()() - if debugDecode { - fmt.Println("dynamic huffman block") - } - default: - // 3 is reserved. - if debugDecode { - fmt.Println("reserved data block encountered") - } - f.err = CorruptInputError(f.roffset) - } -} - -func (f *decompressor) Read(b []byte) (int, error) { - for { - if len(f.toRead) > 0 { - n := copy(b, f.toRead) - f.toRead = f.toRead[n:] - if len(f.toRead) == 0 { - return n, f.err - } - return n, nil - } - if f.err != nil { - return 0, f.err - } - f.step(f) - if f.err != nil && len(f.toRead) == 0 { - f.toRead = f.dict.readFlush() // Flush what's left in case of error - } - } -} - -// Support the io.WriteTo interface for io.Copy and friends. -func (f *decompressor) WriteTo(w io.Writer) (int64, error) { - total := int64(0) - flushed := false - for { - if len(f.toRead) > 0 { - n, err := w.Write(f.toRead) - total += int64(n) - if err != nil { - f.err = err - return total, err - } - if n != len(f.toRead) { - return total, io.ErrShortWrite - } - f.toRead = f.toRead[:0] - } - if f.err != nil && flushed { - if f.err == io.EOF { - return total, nil - } - return total, f.err - } - if f.err == nil { - f.step(f) - } - if len(f.toRead) == 0 && f.err != nil && !flushed { - f.toRead = f.dict.readFlush() // Flush what's left in case of error - flushed = true - } - } -} - -func (f *decompressor) Close() error { - if f.err == io.EOF { - return nil - } - return f.err -} - -// RFC 1951 section 3.2.7. -// Compression with dynamic Huffman codes - -var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} - -func (f *decompressor) readHuffman() error { - // HLIT[5], HDIST[5], HCLEN[4]. - for f.nb < 5+5+4 { - if err := f.moreBits(); err != nil { - return err - } - } - nlit := int(f.b&0x1F) + 257 - if nlit > maxNumLit { - if debugDecode { - fmt.Println("nlit > maxNumLit", nlit) - } - return CorruptInputError(f.roffset) - } - f.b >>= 5 - ndist := int(f.b&0x1F) + 1 - if ndist > maxNumDist { - if debugDecode { - fmt.Println("ndist > maxNumDist", ndist) - } - return CorruptInputError(f.roffset) - } - f.b >>= 5 - nclen := int(f.b&0xF) + 4 - // numCodes is 19, so nclen is always valid. - f.b >>= 4 - f.nb -= 5 + 5 + 4 - - // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order. - for i := 0; i < nclen; i++ { - for f.nb < 3 { - if err := f.moreBits(); err != nil { - return err - } - } - f.codebits[codeOrder[i]] = int(f.b & 0x7) - f.b >>= 3 - f.nb -= 3 - } - for i := nclen; i < len(codeOrder); i++ { - f.codebits[codeOrder[i]] = 0 - } - if !f.h1.init(f.codebits[0:]) { - if debugDecode { - fmt.Println("init codebits failed") - } - return CorruptInputError(f.roffset) - } - - // HLIT + 257 code lengths, HDIST + 1 code lengths, - // using the code length Huffman code. - for i, n := 0, nlit+ndist; i < n; { - x, err := f.huffSym(&f.h1) - if err != nil { - return err - } - if x < 16 { - // Actual length. - f.bits[i] = x - i++ - continue - } - // Repeat previous length or zero. - var rep int - var nb uint - var b int - switch x { - default: - return InternalError("unexpected length code") - case 16: - rep = 3 - nb = 2 - if i == 0 { - if debugDecode { - fmt.Println("i==0") - } - return CorruptInputError(f.roffset) - } - b = f.bits[i-1] - case 17: - rep = 3 - nb = 3 - b = 0 - case 18: - rep = 11 - nb = 7 - b = 0 - } - for f.nb < nb { - if err := f.moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits:", err) - } - return err - } - } - rep += int(f.b & uint32(1<<(nb®SizeMaskUint32)-1)) - f.b >>= nb & regSizeMaskUint32 - f.nb -= nb - if i+rep > n { - if debugDecode { - fmt.Println("i+rep > n", i, rep, n) - } - return CorruptInputError(f.roffset) - } - for j := 0; j < rep; j++ { - f.bits[i] = b - i++ - } - } - - if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) { - if debugDecode { - fmt.Println("init2 failed") - } - return CorruptInputError(f.roffset) - } - - // As an optimization, we can initialize the maxRead bits to read at a time - // for the HLIT tree to the length of the EOB marker since we know that - // every block must terminate with one. This preserves the property that - // we never read any extra bytes after the end of the DEFLATE stream. - if f.h1.maxRead < f.bits[endBlockMarker] { - f.h1.maxRead = f.bits[endBlockMarker] - } - if !f.final { - // If not the final block, the smallest block possible is - // a predefined table, BTYPE=01, with a single EOB marker. - // This will take up 3 + 7 bits. - f.h1.maxRead += 10 - } - - return nil -} - -// Copy a single uncompressed data block from input to output. -func (f *decompressor) dataBlock() { - // Uncompressed. - // Discard current half-byte. - left := (f.nb) & 7 - f.nb -= left - f.b >>= left - - offBytes := f.nb >> 3 - // Unfilled values will be overwritten. - f.buf[0] = uint8(f.b) - f.buf[1] = uint8(f.b >> 8) - f.buf[2] = uint8(f.b >> 16) - f.buf[3] = uint8(f.b >> 24) - - f.roffset += int64(offBytes) - f.nb, f.b = 0, 0 - - // Length then ones-complement of length. - nr, err := io.ReadFull(f.r, f.buf[offBytes:4]) - f.roffset += int64(nr) - if err != nil { - f.err = noEOF(err) - return - } - n := uint16(f.buf[0]) | uint16(f.buf[1])<<8 - nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8 - if nn != ^n { - if debugDecode { - ncomp := ^n - fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp) - } - f.err = CorruptInputError(f.roffset) - return - } - - if n == 0 { - f.toRead = f.dict.readFlush() - f.finishBlock() - return - } - - f.copyLen = int(n) - f.copyData() -} - -// copyData copies f.copyLen bytes from the underlying reader into f.hist. -// It pauses for reads when f.hist is full. -func (f *decompressor) copyData() { - buf := f.dict.writeSlice() - if len(buf) > f.copyLen { - buf = buf[:f.copyLen] - } - - cnt, err := io.ReadFull(f.r, buf) - f.roffset += int64(cnt) - f.copyLen -= cnt - f.dict.writeMark(cnt) - if err != nil { - f.err = noEOF(err) - return - } - - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).copyData - return - } - f.finishBlock() -} - -func (f *decompressor) finishBlock() { - if f.final { - if f.dict.availRead() > 0 { - f.toRead = f.dict.readFlush() - } - f.err = io.EOF - } - f.step = (*decompressor).nextBlock -} - -// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF. -func noEOF(e error) error { - if e == io.EOF { - return io.ErrUnexpectedEOF - } - return e -} - -func (f *decompressor) moreBits() error { - c, err := f.r.ReadByte() - if err != nil { - return noEOF(err) - } - f.roffset++ - f.b |= uint32(c) << (f.nb & regSizeMaskUint32) - f.nb += 8 - return nil -} - -// Read the next Huffman-encoded symbol from f according to h. -func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(h.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b - for { - for nb < n { - c, err := f.r.ReadByte() - if err != nil { - f.b = b - f.nb = nb - return 0, noEOF(err) - } - f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 - } - chunk := h.chunks[b&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= nb { - if n == 0 { - f.b = b - f.nb = nb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return 0, f.err - } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n - return int(chunk >> huffmanValueShift), nil - } - } -} - -func makeReader(r io.Reader) Reader { - if rr, ok := r.(Reader); ok { - return rr - } - return bufio.NewReader(r) -} - -func fixedHuffmanDecoderInit() { - fixedOnce.Do(func() { - // These come from the RFC section 3.2.6. - var bits [288]int - for i := 0; i < 144; i++ { - bits[i] = 8 - } - for i := 144; i < 256; i++ { - bits[i] = 9 - } - for i := 256; i < 280; i++ { - bits[i] = 7 - } - for i := 280; i < 288; i++ { - bits[i] = 8 - } - fixedHuffmanDecoder.init(bits[:]) - }) -} - -func (f *decompressor) Reset(r io.Reader, dict []byte) error { - *f = decompressor{ - r: makeReader(r), - bits: f.bits, - codebits: f.codebits, - h1: f.h1, - h2: f.h2, - dict: f.dict, - step: (*decompressor).nextBlock, - } - f.dict.init(maxMatchOffset, dict) - return nil -} - -// NewReader returns a new ReadCloser that can be used -// to read the uncompressed version of r. -// If r does not also implement io.ByteReader, -// the decompressor may read more data than necessary from r. -// It is the caller's responsibility to call Close on the ReadCloser -// when finished reading. -// -// The ReadCloser returned by NewReader also implements Resetter. -func NewReader(r io.Reader) io.ReadCloser { - fixedHuffmanDecoderInit() - - var f decompressor - f.r = makeReader(r) - f.bits = new([maxNumLit + maxNumDist]int) - f.codebits = new([numCodes]int) - f.step = (*decompressor).nextBlock - f.dict.init(maxMatchOffset, nil) - return &f -} - -// NewReaderDict is like NewReader but initializes the reader -// with a preset dictionary. The returned Reader behaves as if -// the uncompressed data stream started with the given dictionary, -// which has already been read. NewReaderDict is typically used -// to read data compressed by NewWriterDict. -// -// The ReadCloser returned by NewReader also implements Resetter. -func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { - fixedHuffmanDecoderInit() - - var f decompressor - f.r = makeReader(r) - f.bits = new([maxNumLit + maxNumDist]int) - f.codebits = new([numCodes]int) - f.step = (*decompressor).nextBlock - f.dict.init(maxMatchOffset, dict) - return &f -} diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go deleted file mode 100644 index 61342b6b..00000000 --- a/vendor/github.com/klauspost/compress/flate/inflate_gen.go +++ /dev/null @@ -1,1283 +0,0 @@ -// Code generated by go generate gen_inflate.go. DO NOT EDIT. - -package flate - -import ( - "bufio" - "bytes" - "fmt" - "math/bits" - "strings" -) - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanBytesBuffer() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.(*bytes.Buffer) - - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - fnb, fb, dict := f.nb, f.b, &f.dict - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - dict.writeByte(byte(v)) - if dict.availWrite() == 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanBytesBuffer - f.stepState = stateInit - f.b, f.nb = fb, fnb - return - } - goto readLiteral - case v == 256: - f.b, f.nb = fb, fnb - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - length += int(fb & bitMask32[n]) - fb >>= n & regSizeMaskUint32 - fnb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - f.b, f.nb = fb, fnb - return - } - - var dist uint32 - if f.hd == nil { - for fnb < 5 { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) - fb >>= 5 - fnb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for fnb < nb { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - fnb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - // slower: dist = bitMask32[nb+1] + 2 + extra - default: - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(dict.histSize()) { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist > dict.histSize():", dist, dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work - f.stepState = stateDict - f.b, f.nb = fb, fnb - return - } - goto readLiteral - } - // Not reached -} - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanBytesReader() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.(*bytes.Reader) - - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - fnb, fb, dict := f.nb, f.b, &f.dict - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - dict.writeByte(byte(v)) - if dict.availWrite() == 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanBytesReader - f.stepState = stateInit - f.b, f.nb = fb, fnb - return - } - goto readLiteral - case v == 256: - f.b, f.nb = fb, fnb - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - length += int(fb & bitMask32[n]) - fb >>= n & regSizeMaskUint32 - fnb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - f.b, f.nb = fb, fnb - return - } - - var dist uint32 - if f.hd == nil { - for fnb < 5 { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) - fb >>= 5 - fnb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for fnb < nb { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - fnb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - // slower: dist = bitMask32[nb+1] + 2 + extra - default: - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(dict.histSize()) { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist > dict.histSize():", dist, dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanBytesReader // We need to continue this work - f.stepState = stateDict - f.b, f.nb = fb, fnb - return - } - goto readLiteral - } - // Not reached -} - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanBufioReader() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.(*bufio.Reader) - - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - fnb, fb, dict := f.nb, f.b, &f.dict - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - dict.writeByte(byte(v)) - if dict.availWrite() == 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanBufioReader - f.stepState = stateInit - f.b, f.nb = fb, fnb - return - } - goto readLiteral - case v == 256: - f.b, f.nb = fb, fnb - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - length += int(fb & bitMask32[n]) - fb >>= n & regSizeMaskUint32 - fnb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - f.b, f.nb = fb, fnb - return - } - - var dist uint32 - if f.hd == nil { - for fnb < 5 { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) - fb >>= 5 - fnb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for fnb < nb { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - fnb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - // slower: dist = bitMask32[nb+1] + 2 + extra - default: - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(dict.histSize()) { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist > dict.histSize():", dist, dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanBufioReader // We need to continue this work - f.stepState = stateDict - f.b, f.nb = fb, fnb - return - } - goto readLiteral - } - // Not reached -} - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanStringsReader() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.(*strings.Reader) - - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - fnb, fb, dict := f.nb, f.b, &f.dict - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - dict.writeByte(byte(v)) - if dict.availWrite() == 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanStringsReader - f.stepState = stateInit - f.b, f.nb = fb, fnb - return - } - goto readLiteral - case v == 256: - f.b, f.nb = fb, fnb - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - length += int(fb & bitMask32[n]) - fb >>= n & regSizeMaskUint32 - fnb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - f.b, f.nb = fb, fnb - return - } - - var dist uint32 - if f.hd == nil { - for fnb < 5 { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) - fb >>= 5 - fnb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for fnb < nb { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - fnb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - // slower: dist = bitMask32[nb+1] + 2 + extra - default: - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(dict.histSize()) { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist > dict.histSize():", dist, dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanStringsReader // We need to continue this work - f.stepState = stateDict - f.b, f.nb = fb, fnb - return - } - goto readLiteral - } - // Not reached -} - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanGenericReader() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.(Reader) - - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - fnb, fb, dict := f.nb, f.b, &f.dict - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - dict.writeByte(byte(v)) - if dict.availWrite() == 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanGenericReader - f.stepState = stateInit - f.b, f.nb = fb, fnb - return - } - goto readLiteral - case v == 256: - f.b, f.nb = fb, fnb - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - length += int(fb & bitMask32[n]) - fb >>= n & regSizeMaskUint32 - fnb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - f.b, f.nb = fb, fnb - return - } - - var dist uint32 - if f.hd == nil { - for fnb < 5 { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) - fb >>= 5 - fnb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for fnb < nb { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - fnb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - // slower: dist = bitMask32[nb+1] + 2 + extra - default: - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(dict.histSize()) { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist > dict.histSize():", dist, dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanGenericReader // We need to continue this work - f.stepState = stateDict - f.b, f.nb = fb, fnb - return - } - goto readLiteral - } - // Not reached -} - -func (f *decompressor) huffmanBlockDecoder() func() { - switch f.r.(type) { - case *bytes.Buffer: - return f.huffmanBytesBuffer - case *bytes.Reader: - return f.huffmanBytesReader - case *bufio.Reader: - return f.huffmanBufioReader - case *strings.Reader: - return f.huffmanStringsReader - case Reader: - return f.huffmanGenericReader - default: - return f.huffmanGenericReader - } -} diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go deleted file mode 100644 index 703b9a89..00000000 --- a/vendor/github.com/klauspost/compress/flate/level1.go +++ /dev/null @@ -1,241 +0,0 @@ -package flate - -import ( - "encoding/binary" - "fmt" - "math/bits" -) - -// fastGen maintains the table for matches, -// and the previous byte block for level 2. -// This is the generic implementation. -type fastEncL1 struct { - fastGen - table [tableSize]tableEntry -} - -// EncodeL1 uses a similar algorithm to level 1 -func (e *fastEncL1) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashBytes = 5 - ) - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - - for { - const skipLog = 5 - const doEvery = 2 - - nextS := s - var candidate tableEntry - for { - nextHash := hashLen(cv, tableBits, hashBytes) - candidate = e.table[nextHash] - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - - now := load6432(src, nextS) - e.table[nextHash] = tableEntry{offset: s + e.cur} - nextHash = hashLen(now, tableBits, hashBytes) - - offset := s - (candidate.offset - e.cur) - if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - e.table[nextHash] = tableEntry{offset: nextS + e.cur} - break - } - - // Do one right away... - cv = now - s = nextS - nextS++ - candidate = e.table[nextHash] - now >>= 8 - e.table[nextHash] = tableEntry{offset: s + e.cur} - - offset = s - (candidate.offset - e.cur) - if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - e.table[nextHash] = tableEntry{offset: nextS + e.cur} - break - } - cv = now - s = nextS - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - - // Extend the 4-byte match as long as possible. - t := candidate.offset - e.cur - var l = int32(4) - if false { - l = e.matchlenLong(s+4, t+4, src) + 4 - } else { - // inlined: - a := src[s+4:] - b := src[t+4:] - for len(a) >= 8 { - if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { - l += int32(bits.TrailingZeros64(diff) >> 3) - break - } - l += 8 - a = a[8:] - b = b[8:] - } - if len(a) < 8 { - b = b[:len(a)] - for i := range a { - if a[i] != b[i] { - break - } - l++ - } - } - } - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - - // Save the match found - if false { - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - } else { - // Inlined... - xoffset := uint32(s - t - baseMatchOffset) - xlength := l - oc := offsetCode(xoffset) - xoffset |= oc << 16 - for xlength > 0 { - xl := xlength - if xl > 258 { - if xl > 258+baseMatchLength { - xl = 258 - } else { - xl = 258 - baseMatchLength - } - } - xlength -= xl - xl -= baseMatchLength - dst.extraHist[lengthCodes1[uint8(xl)]]++ - dst.offHist[oc]++ - dst.tokens[dst.n] = token(matchType | uint32(xl)<= s { - s = nextS + 1 - } - if s >= sLimit { - // Index first pair after match end. - if int(s+l+8) < len(src) { - cv := load6432(src, s) - e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur} - } - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-2 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load6432(src, s-2) - o := e.cur + s - 2 - prevHash := hashLen(x, tableBits, hashBytes) - e.table[prevHash] = tableEntry{offset: o} - x >>= 16 - currHash := hashLen(x, tableBits, hashBytes) - candidate = e.table[currHash] - e.table[currHash] = tableEntry{offset: o + 2} - - offset := s - (candidate.offset - e.cur) - if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) { - cv = x >> 8 - s++ - break - } - } - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - emitLiteral(dst, src[nextEmit:]) - } -} diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go deleted file mode 100644 index 876dfbe3..00000000 --- a/vendor/github.com/klauspost/compress/flate/level2.go +++ /dev/null @@ -1,214 +0,0 @@ -package flate - -import "fmt" - -// fastGen maintains the table for matches, -// and the previous byte block for level 2. -// This is the generic implementation. -type fastEncL2 struct { - fastGen - table [bTableSize]tableEntry -} - -// EncodeL2 uses a similar algorithm to level 1, but is capable -// of matching across blocks giving better compression at a small slowdown. -func (e *fastEncL2) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashBytes = 5 - ) - - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - for { - // When should we start skipping if we haven't found matches in a long while. - const skipLog = 5 - const doEvery = 2 - - nextS := s - var candidate tableEntry - for { - nextHash := hashLen(cv, bTableBits, hashBytes) - s = nextS - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - candidate = e.table[nextHash] - now := load6432(src, nextS) - e.table[nextHash] = tableEntry{offset: s + e.cur} - nextHash = hashLen(now, bTableBits, hashBytes) - - offset := s - (candidate.offset - e.cur) - if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - e.table[nextHash] = tableEntry{offset: nextS + e.cur} - break - } - - // Do one right away... - cv = now - s = nextS - nextS++ - candidate = e.table[nextHash] - now >>= 8 - e.table[nextHash] = tableEntry{offset: s + e.cur} - - offset = s - (candidate.offset - e.cur) - if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - break - } - cv = now - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - - // Extend the 4-byte match as long as possible. - t := candidate.offset - e.cur - l := e.matchlenLong(s+4, t+4, src) + 4 - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - // Index first pair after match end. - if int(s+l+8) < len(src) { - cv := load6432(src, s) - e.table[hashLen(cv, bTableBits, hashBytes)] = tableEntry{offset: s + e.cur} - } - goto emitRemainder - } - - // Store every second hash in-between, but offset by 1. - for i := s - l + 2; i < s-5; i += 7 { - x := load6432(src, i) - nextHash := hashLen(x, bTableBits, hashBytes) - e.table[nextHash] = tableEntry{offset: e.cur + i} - // Skip one - x >>= 16 - nextHash = hashLen(x, bTableBits, hashBytes) - e.table[nextHash] = tableEntry{offset: e.cur + i + 2} - // Skip one - x >>= 16 - nextHash = hashLen(x, bTableBits, hashBytes) - e.table[nextHash] = tableEntry{offset: e.cur + i + 4} - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-2 to s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load6432(src, s-2) - o := e.cur + s - 2 - prevHash := hashLen(x, bTableBits, hashBytes) - prevHash2 := hashLen(x>>8, bTableBits, hashBytes) - e.table[prevHash] = tableEntry{offset: o} - e.table[prevHash2] = tableEntry{offset: o + 1} - currHash := hashLen(x>>16, bTableBits, hashBytes) - candidate = e.table[currHash] - e.table[currHash] = tableEntry{offset: o + 2} - - offset := s - (candidate.offset - e.cur) - if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) { - cv = x >> 24 - s++ - break - } - } - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go deleted file mode 100644 index 7aa2b72a..00000000 --- a/vendor/github.com/klauspost/compress/flate/level3.go +++ /dev/null @@ -1,241 +0,0 @@ -package flate - -import "fmt" - -// fastEncL3 -type fastEncL3 struct { - fastGen - table [1 << 16]tableEntryPrev -} - -// Encode uses a similar algorithm to level 2, will check up to two candidates. -func (e *fastEncL3) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - tableBits = 16 - tableSize = 1 << tableBits - hashBytes = 5 - ) - - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntryPrev{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i] - if v.Cur.offset <= minOff { - v.Cur.offset = 0 - } else { - v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset - } - if v.Prev.offset <= minOff { - v.Prev.offset = 0 - } else { - v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset - } - e.table[i] = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // Skip if too small. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - for { - const skipLog = 7 - nextS := s - var candidate tableEntry - for { - nextHash := hashLen(cv, tableBits, hashBytes) - s = nextS - nextS = s + 1 + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - candidates := e.table[nextHash] - now := load6432(src, nextS) - - // Safe offset distance until s + 4... - minOffset := e.cur + s - (maxMatchOffset - 4) - e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}} - - // Check both candidates - candidate = candidates.Cur - if candidate.offset < minOffset { - cv = now - // Previous will also be invalid, we have nothing. - continue - } - - if uint32(cv) == load3232(src, candidate.offset-e.cur) { - if candidates.Prev.offset < minOffset || uint32(cv) != load3232(src, candidates.Prev.offset-e.cur) { - break - } - // Both match and are valid, pick longest. - offset := s - (candidate.offset - e.cur) - o2 := s - (candidates.Prev.offset - e.cur) - l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:]) - if l2 > l1 { - candidate = candidates.Prev - } - break - } else { - // We only check if value mismatches. - // Offset will always be invalid in other cases. - candidate = candidates.Prev - if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - break - } - } - cv = now - } - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - - // Extend the 4-byte match as long as possible. - // - t := candidate.offset - e.cur - l := e.matchlenLong(s+4, t+4, src) + 4 - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - t += l - // Index first pair after match end. - if int(t+8) < len(src) && t > 0 { - cv = load6432(src, t) - nextHash := hashLen(cv, tableBits, hashBytes) - e.table[nextHash] = tableEntryPrev{ - Prev: e.table[nextHash].Cur, - Cur: tableEntry{offset: e.cur + t}, - } - } - goto emitRemainder - } - - // Store every 5th hash in-between. - for i := s - l + 2; i < s-5; i += 6 { - nextHash := hashLen(load6432(src, i), tableBits, hashBytes) - e.table[nextHash] = tableEntryPrev{ - Prev: e.table[nextHash].Cur, - Cur: tableEntry{offset: e.cur + i}} - } - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-2 to s. - x := load6432(src, s-2) - prevHash := hashLen(x, tableBits, hashBytes) - - e.table[prevHash] = tableEntryPrev{ - Prev: e.table[prevHash].Cur, - Cur: tableEntry{offset: e.cur + s - 2}, - } - x >>= 8 - prevHash = hashLen(x, tableBits, hashBytes) - - e.table[prevHash] = tableEntryPrev{ - Prev: e.table[prevHash].Cur, - Cur: tableEntry{offset: e.cur + s - 1}, - } - x >>= 8 - currHash := hashLen(x, tableBits, hashBytes) - candidates := e.table[currHash] - cv = x - e.table[currHash] = tableEntryPrev{ - Prev: candidates.Cur, - Cur: tableEntry{offset: s + e.cur}, - } - - // Check both candidates - candidate = candidates.Cur - minOffset := e.cur + s - (maxMatchOffset - 4) - - if candidate.offset > minOffset { - if uint32(cv) == load3232(src, candidate.offset-e.cur) { - // Found a match... - continue - } - candidate = candidates.Prev - if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - // Match at prev... - continue - } - } - cv = x >> 8 - s++ - break - } - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go deleted file mode 100644 index 23c08b32..00000000 --- a/vendor/github.com/klauspost/compress/flate/level4.go +++ /dev/null @@ -1,221 +0,0 @@ -package flate - -import "fmt" - -type fastEncL4 struct { - fastGen - table [tableSize]tableEntry - bTable [tableSize]tableEntry -} - -func (e *fastEncL4) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashShortBytes = 4 - ) - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.bTable[:] { - e.bTable[i] = tableEntry{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - for i := range e.bTable[:] { - v := e.bTable[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.bTable[i].offset = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - for { - const skipLog = 6 - const doEvery = 1 - - nextS := s - var t int32 - for { - nextHashS := hashLen(cv, tableBits, hashShortBytes) - nextHashL := hash7(cv, tableBits) - - s = nextS - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - // Fetch a short+long candidate - sCandidate := e.table[nextHashS] - lCandidate := e.bTable[nextHashL] - next := load6432(src, nextS) - entry := tableEntry{offset: s + e.cur} - e.table[nextHashS] = entry - e.bTable[nextHashL] = entry - - t = lCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) { - // We got a long match. Use that. - break - } - - t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { - // Found a 4 match... - lCandidate = e.bTable[hash7(next, tableBits)] - - // If the next long is a candidate, check if we should use that instead... - lOff := nextS - (lCandidate.offset - e.cur) - if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) { - l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:]) - if l2 > l1 { - s = nextS - t = lCandidate.offset - e.cur - } - } - break - } - cv = next - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - // Extend the 4-byte match as long as possible. - l := e.matchlenLong(s+4, t+4, src) + 4 - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - if debugDeflate { - if t >= s { - panic("s-t") - } - if (s - t) > maxMatchOffset { - panic(fmt.Sprintln("mmo", t)) - } - if l < baseMatchLength { - panic("bml") - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - // Index first pair after match end. - if int(s+8) < len(src) { - cv := load6432(src, s) - e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: s + e.cur} - e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur} - } - goto emitRemainder - } - - // Store every 3rd hash in-between - if true { - i := nextS - if i < s-1 { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - t2 := tableEntry{offset: t.offset + 1} - e.bTable[hash7(cv, tableBits)] = t - e.bTable[hash7(cv>>8, tableBits)] = t2 - e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 - - i += 3 - for ; i < s-1; i += 3 { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - t2 := tableEntry{offset: t.offset + 1} - e.bTable[hash7(cv, tableBits)] = t - e.bTable[hash7(cv>>8, tableBits)] = t2 - e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 - } - } - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. - x := load6432(src, s-1) - o := e.cur + s - 1 - prevHashS := hashLen(x, tableBits, hashShortBytes) - prevHashL := hash7(x, tableBits) - e.table[prevHashS] = tableEntry{offset: o} - e.bTable[prevHashL] = tableEntry{offset: o} - cv = x >> 8 - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go deleted file mode 100644 index 1f61ec18..00000000 --- a/vendor/github.com/klauspost/compress/flate/level5.go +++ /dev/null @@ -1,708 +0,0 @@ -package flate - -import "fmt" - -type fastEncL5 struct { - fastGen - table [tableSize]tableEntry - bTable [tableSize]tableEntryPrev -} - -func (e *fastEncL5) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashShortBytes = 4 - ) - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.bTable[:] { - e.bTable[i] = tableEntryPrev{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - for i := range e.bTable[:] { - v := e.bTable[i] - if v.Cur.offset <= minOff { - v.Cur.offset = 0 - v.Prev.offset = 0 - } else { - v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset - if v.Prev.offset <= minOff { - v.Prev.offset = 0 - } else { - v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset - } - } - e.bTable[i] = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - for { - const skipLog = 6 - const doEvery = 1 - - nextS := s - var l int32 - var t int32 - for { - nextHashS := hashLen(cv, tableBits, hashShortBytes) - nextHashL := hash7(cv, tableBits) - - s = nextS - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - // Fetch a short+long candidate - sCandidate := e.table[nextHashS] - lCandidate := e.bTable[nextHashL] - next := load6432(src, nextS) - entry := tableEntry{offset: s + e.cur} - e.table[nextHashS] = entry - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = entry, eLong.Cur - - nextHashS = hashLen(next, tableBits, hashShortBytes) - nextHashL = hash7(next, tableBits) - - t = lCandidate.Cur.offset - e.cur - if s-t < maxMatchOffset { - if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - - t2 := lCandidate.Prev.offset - e.cur - if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - l = e.matchlen(s+4, t+4, src) + 4 - ml1 := e.matchlen(s+4, t2+4, src) + 4 - if ml1 > l { - t = t2 - l = ml1 - break - } - } - break - } - t = lCandidate.Prev.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - break - } - } - - t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { - // Found a 4 match... - l = e.matchlen(s+4, t+4, src) + 4 - lCandidate = e.bTable[nextHashL] - // Store the next match - - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - - // If the next long is a candidate, use that... - t2 := lCandidate.Cur.offset - e.cur - if nextS-t2 < maxMatchOffset { - if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 - if ml > l { - t = t2 - s = nextS - l = ml - break - } - } - // If the previous long is a candidate, use that... - t2 = lCandidate.Prev.offset - e.cur - if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 - if ml > l { - t = t2 - s = nextS - l = ml - break - } - } - } - break - } - cv = next - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - if l == 0 { - // Extend the 4-byte match as long as possible. - l = e.matchlenLong(s+4, t+4, src) + 4 - } else if l == maxMatchLength { - l += e.matchlenLong(s+l, t+l, src) - } - - // Try to locate a better match by checking the end of best match... - if sAt := s + l; l < 30 && sAt < sLimit { - // Allow some bytes at the beginning to mismatch. - // Sweet spot is 2/3 bytes depending on input. - // 3 is only a little better when it is but sometimes a lot worse. - // The skipped bytes are tested in Extend backwards, - // and still picked up as part of the match if they do. - const skipBeginning = 2 - eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset - t2 := eLong - e.cur - l + skipBeginning - s2 := s + skipBeginning - off := s2 - t2 - if t2 >= 0 && off < maxMatchOffset && off > 0 { - if l2 := e.matchlenLong(s2, t2, src); l2 > l { - t = t2 - l = l2 - s = s2 - } - } - } - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - if debugDeflate { - if t >= s { - panic(fmt.Sprintln("s-t", s, t)) - } - if (s - t) > maxMatchOffset { - panic(fmt.Sprintln("mmo", s-t)) - } - if l < baseMatchLength { - panic("bml") - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - goto emitRemainder - } - - // Store every 3rd hash in-between. - if true { - const hashEvery = 3 - i := s - l + 1 - if i < s-1 { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - e.table[hashLen(cv, tableBits, hashShortBytes)] = t - eLong := &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = t, eLong.Cur - - // Do an long at i+1 - cv >>= 8 - t = tableEntry{offset: t.offset + 1} - eLong = &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = t, eLong.Cur - - // We only have enough bits for a short entry at i+2 - cv >>= 8 - t = tableEntry{offset: t.offset + 1} - e.table[hashLen(cv, tableBits, hashShortBytes)] = t - - // Skip one - otherwise we risk hitting 's' - i += 4 - for ; i < s-1; i += hashEvery { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - t2 := tableEntry{offset: t.offset + 1} - eLong := &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = t, eLong.Cur - e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 - } - } - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. - x := load6432(src, s-1) - o := e.cur + s - 1 - prevHashS := hashLen(x, tableBits, hashShortBytes) - prevHashL := hash7(x, tableBits) - e.table[prevHashS] = tableEntry{offset: o} - eLong := &e.bTable[prevHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur - cv = x >> 8 - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} - -// fastEncL5Window is a level 5 encoder, -// but with a custom window size. -type fastEncL5Window struct { - hist []byte - cur int32 - maxOffset int32 - table [tableSize]tableEntry - bTable [tableSize]tableEntryPrev -} - -func (e *fastEncL5Window) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashShortBytes = 4 - ) - maxMatchOffset := e.maxOffset - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.bTable[:] { - e.bTable[i] = tableEntryPrev{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - for i := range e.bTable[:] { - v := e.bTable[i] - if v.Cur.offset <= minOff { - v.Cur.offset = 0 - v.Prev.offset = 0 - } else { - v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset - if v.Prev.offset <= minOff { - v.Prev.offset = 0 - } else { - v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset - } - } - e.bTable[i] = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - for { - const skipLog = 6 - const doEvery = 1 - - nextS := s - var l int32 - var t int32 - for { - nextHashS := hashLen(cv, tableBits, hashShortBytes) - nextHashL := hash7(cv, tableBits) - - s = nextS - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - // Fetch a short+long candidate - sCandidate := e.table[nextHashS] - lCandidate := e.bTable[nextHashL] - next := load6432(src, nextS) - entry := tableEntry{offset: s + e.cur} - e.table[nextHashS] = entry - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = entry, eLong.Cur - - nextHashS = hashLen(next, tableBits, hashShortBytes) - nextHashL = hash7(next, tableBits) - - t = lCandidate.Cur.offset - e.cur - if s-t < maxMatchOffset { - if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - - t2 := lCandidate.Prev.offset - e.cur - if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - l = e.matchlen(s+4, t+4, src) + 4 - ml1 := e.matchlen(s+4, t2+4, src) + 4 - if ml1 > l { - t = t2 - l = ml1 - break - } - } - break - } - t = lCandidate.Prev.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - break - } - } - - t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { - // Found a 4 match... - l = e.matchlen(s+4, t+4, src) + 4 - lCandidate = e.bTable[nextHashL] - // Store the next match - - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - - // If the next long is a candidate, use that... - t2 := lCandidate.Cur.offset - e.cur - if nextS-t2 < maxMatchOffset { - if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 - if ml > l { - t = t2 - s = nextS - l = ml - break - } - } - // If the previous long is a candidate, use that... - t2 = lCandidate.Prev.offset - e.cur - if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 - if ml > l { - t = t2 - s = nextS - l = ml - break - } - } - } - break - } - cv = next - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - if l == 0 { - // Extend the 4-byte match as long as possible. - l = e.matchlenLong(s+4, t+4, src) + 4 - } else if l == maxMatchLength { - l += e.matchlenLong(s+l, t+l, src) - } - - // Try to locate a better match by checking the end of best match... - if sAt := s + l; l < 30 && sAt < sLimit { - // Allow some bytes at the beginning to mismatch. - // Sweet spot is 2/3 bytes depending on input. - // 3 is only a little better when it is but sometimes a lot worse. - // The skipped bytes are tested in Extend backwards, - // and still picked up as part of the match if they do. - const skipBeginning = 2 - eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset - t2 := eLong - e.cur - l + skipBeginning - s2 := s + skipBeginning - off := s2 - t2 - if t2 >= 0 && off < maxMatchOffset && off > 0 { - if l2 := e.matchlenLong(s2, t2, src); l2 > l { - t = t2 - l = l2 - s = s2 - } - } - } - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - if debugDeflate { - if t >= s { - panic(fmt.Sprintln("s-t", s, t)) - } - if (s - t) > maxMatchOffset { - panic(fmt.Sprintln("mmo", s-t)) - } - if l < baseMatchLength { - panic("bml") - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - goto emitRemainder - } - - // Store every 3rd hash in-between. - if true { - const hashEvery = 3 - i := s - l + 1 - if i < s-1 { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - e.table[hashLen(cv, tableBits, hashShortBytes)] = t - eLong := &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = t, eLong.Cur - - // Do an long at i+1 - cv >>= 8 - t = tableEntry{offset: t.offset + 1} - eLong = &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = t, eLong.Cur - - // We only have enough bits for a short entry at i+2 - cv >>= 8 - t = tableEntry{offset: t.offset + 1} - e.table[hashLen(cv, tableBits, hashShortBytes)] = t - - // Skip one - otherwise we risk hitting 's' - i += 4 - for ; i < s-1; i += hashEvery { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - t2 := tableEntry{offset: t.offset + 1} - eLong := &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = t, eLong.Cur - e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 - } - } - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. - x := load6432(src, s-1) - o := e.cur + s - 1 - prevHashS := hashLen(x, tableBits, hashShortBytes) - prevHashL := hash7(x, tableBits) - e.table[prevHashS] = tableEntry{offset: o} - eLong := &e.bTable[prevHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur - cv = x >> 8 - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} - -// Reset the encoding table. -func (e *fastEncL5Window) Reset() { - // We keep the same allocs, since we are compressing the same block sizes. - if cap(e.hist) < allocHistory { - e.hist = make([]byte, 0, allocHistory) - } - - // We offset current position so everything will be out of reach. - // If we are above the buffer reset it will be cleared anyway since len(hist) == 0. - if e.cur <= int32(bufferReset) { - e.cur += e.maxOffset + int32(len(e.hist)) - } - e.hist = e.hist[:0] -} - -func (e *fastEncL5Window) addBlock(src []byte) int32 { - // check if we have space already - maxMatchOffset := e.maxOffset - - if len(e.hist)+len(src) > cap(e.hist) { - if cap(e.hist) == 0 { - e.hist = make([]byte, 0, allocHistory) - } else { - if cap(e.hist) < int(maxMatchOffset*2) { - panic("unexpected buffer size") - } - // Move down - offset := int32(len(e.hist)) - maxMatchOffset - copy(e.hist[0:maxMatchOffset], e.hist[offset:]) - e.cur += offset - e.hist = e.hist[:maxMatchOffset] - } - } - s := int32(len(e.hist)) - e.hist = append(e.hist, src...) - return s -} - -// matchlen will return the match length between offsets and t in src. -// The maximum length returned is maxMatchLength - 4. -// It is assumed that s > t, that t >=0 and s < len(src). -func (e *fastEncL5Window) matchlen(s, t int32, src []byte) int32 { - if debugDecode { - if t >= s { - panic(fmt.Sprint("t >=s:", t, s)) - } - if int(s) >= len(src) { - panic(fmt.Sprint("s >= len(src):", s, len(src))) - } - if t < 0 { - panic(fmt.Sprint("t < 0:", t)) - } - if s-t > e.maxOffset { - panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) - } - } - s1 := int(s) + maxMatchLength - 4 - if s1 > len(src) { - s1 = len(src) - } - - // Extend the match to be as long as possible. - return int32(matchLen(src[s:s1], src[t:])) -} - -// matchlenLong will return the match length between offsets and t in src. -// It is assumed that s > t, that t >=0 and s < len(src). -func (e *fastEncL5Window) matchlenLong(s, t int32, src []byte) int32 { - if debugDeflate { - if t >= s { - panic(fmt.Sprint("t >=s:", t, s)) - } - if int(s) >= len(src) { - panic(fmt.Sprint("s >= len(src):", s, len(src))) - } - if t < 0 { - panic(fmt.Sprint("t < 0:", t)) - } - if s-t > e.maxOffset { - panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) - } - } - // Extend the match to be as long as possible. - return int32(matchLen(src[s:], src[t:])) -} diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go deleted file mode 100644 index f1e9d98f..00000000 --- a/vendor/github.com/klauspost/compress/flate/level6.go +++ /dev/null @@ -1,325 +0,0 @@ -package flate - -import "fmt" - -type fastEncL6 struct { - fastGen - table [tableSize]tableEntry - bTable [tableSize]tableEntryPrev -} - -func (e *fastEncL6) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashShortBytes = 4 - ) - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.bTable[:] { - e.bTable[i] = tableEntryPrev{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - for i := range e.bTable[:] { - v := e.bTable[i] - if v.Cur.offset <= minOff { - v.Cur.offset = 0 - v.Prev.offset = 0 - } else { - v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset - if v.Prev.offset <= minOff { - v.Prev.offset = 0 - } else { - v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset - } - } - e.bTable[i] = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - // Repeat MUST be > 1 and within range - repeat := int32(1) - for { - const skipLog = 7 - const doEvery = 1 - - nextS := s - var l int32 - var t int32 - for { - nextHashS := hashLen(cv, tableBits, hashShortBytes) - nextHashL := hash7(cv, tableBits) - s = nextS - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - // Fetch a short+long candidate - sCandidate := e.table[nextHashS] - lCandidate := e.bTable[nextHashL] - next := load6432(src, nextS) - entry := tableEntry{offset: s + e.cur} - e.table[nextHashS] = entry - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = entry, eLong.Cur - - // Calculate hashes of 'next' - nextHashS = hashLen(next, tableBits, hashShortBytes) - nextHashL = hash7(next, tableBits) - - t = lCandidate.Cur.offset - e.cur - if s-t < maxMatchOffset { - if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { - // Long candidate matches at least 4 bytes. - - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - - // Check the previous long candidate as well. - t2 := lCandidate.Prev.offset - e.cur - if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - l = e.matchlen(s+4, t+4, src) + 4 - ml1 := e.matchlen(s+4, t2+4, src) + 4 - if ml1 > l { - t = t2 - l = ml1 - break - } - } - break - } - // Current value did not match, but check if previous long value does. - t = lCandidate.Prev.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - break - } - } - - t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { - // Found a 4 match... - l = e.matchlen(s+4, t+4, src) + 4 - - // Look up next long candidate (at nextS) - lCandidate = e.bTable[nextHashL] - - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - - // Check repeat at s + repOff - const repOff = 1 - t2 := s - repeat + repOff - if load3232(src, t2) == uint32(cv>>(8*repOff)) { - ml := e.matchlen(s+4+repOff, t2+4, src) + 4 - if ml > l { - t = t2 - l = ml - s += repOff - // Not worth checking more. - break - } - } - - // If the next long is a candidate, use that... - t2 = lCandidate.Cur.offset - e.cur - if nextS-t2 < maxMatchOffset { - if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 - if ml > l { - t = t2 - s = nextS - l = ml - // This is ok, but check previous as well. - } - } - // If the previous long is a candidate, use that... - t2 = lCandidate.Prev.offset - e.cur - if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 - if ml > l { - t = t2 - s = nextS - l = ml - break - } - } - } - break - } - cv = next - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - // Extend the 4-byte match as long as possible. - if l == 0 { - l = e.matchlenLong(s+4, t+4, src) + 4 - } else if l == maxMatchLength { - l += e.matchlenLong(s+l, t+l, src) - } - - // Try to locate a better match by checking the end-of-match... - if sAt := s + l; sAt < sLimit { - // Allow some bytes at the beginning to mismatch. - // Sweet spot is 2/3 bytes depending on input. - // 3 is only a little better when it is but sometimes a lot worse. - // The skipped bytes are tested in Extend backwards, - // and still picked up as part of the match if they do. - const skipBeginning = 2 - eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)] - // Test current - t2 := eLong.Cur.offset - e.cur - l + skipBeginning - s2 := s + skipBeginning - off := s2 - t2 - if off < maxMatchOffset { - if off > 0 && t2 >= 0 { - if l2 := e.matchlenLong(s2, t2, src); l2 > l { - t = t2 - l = l2 - s = s2 - } - } - // Test next: - t2 = eLong.Prev.offset - e.cur - l + skipBeginning - off := s2 - t2 - if off > 0 && off < maxMatchOffset && t2 >= 0 { - if l2 := e.matchlenLong(s2, t2, src); l2 > l { - t = t2 - l = l2 - s = s2 - } - } - } - } - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - if false { - if t >= s { - panic(fmt.Sprintln("s-t", s, t)) - } - if (s - t) > maxMatchOffset { - panic(fmt.Sprintln("mmo", s-t)) - } - if l < baseMatchLength { - panic("bml") - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - repeat = s - t - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - // Index after match end. - for i := nextS + 1; i < int32(len(src))-8; i += 2 { - cv := load6432(src, i) - e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: i + e.cur} - eLong := &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur - } - goto emitRemainder - } - - // Store every long hash in-between and every second short. - if true { - for i := nextS + 1; i < s-1; i += 2 { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - t2 := tableEntry{offset: t.offset + 1} - eLong := &e.bTable[hash7(cv, tableBits)] - eLong2 := &e.bTable[hash7(cv>>8, tableBits)] - e.table[hashLen(cv, tableBits, hashShortBytes)] = t - eLong.Cur, eLong.Prev = t, eLong.Cur - eLong2.Cur, eLong2.Prev = t2, eLong2.Cur - } - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. - cv = load6432(src, s) - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go deleted file mode 100644 index 4bd38858..00000000 --- a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package flate - -// matchLen returns how many bytes match in a and b -// -// It assumes that: -// -// len(a) <= len(b) and len(a) > 0 -// -//go:noescape -func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s deleted file mode 100644 index 9a7655c0..00000000 --- a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s +++ /dev/null @@ -1,68 +0,0 @@ -// Copied from S2 implementation. - -//go:build !appengine && !noasm && gc && !noasm - -#include "textflag.h" - -// func matchLen(a []byte, b []byte) int -// Requires: BMI -TEXT ·matchLen(SB), NOSPLIT, $0-56 - MOVQ a_base+0(FP), AX - MOVQ b_base+24(FP), CX - MOVQ a_len+8(FP), DX - - // matchLen - XORL SI, SI - CMPL DX, $0x08 - JB matchlen_match4_standalone - -matchlen_loopback_standalone: - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - TESTQ BX, BX - JZ matchlen_loop_standalone - -#ifdef GOAMD64_v3 - TZCNTQ BX, BX -#else - BSFQ BX, BX -#endif - SARQ $0x03, BX - LEAL (SI)(BX*1), SI - JMP gen_match_len_end - -matchlen_loop_standalone: - LEAL -8(DX), DX - LEAL 8(SI), SI - CMPL DX, $0x08 - JAE matchlen_loopback_standalone - -matchlen_match4_standalone: - CMPL DX, $0x04 - JB matchlen_match2_standalone - MOVL (AX)(SI*1), BX - CMPL (CX)(SI*1), BX - JNE matchlen_match2_standalone - LEAL -4(DX), DX - LEAL 4(SI), SI - -matchlen_match2_standalone: - CMPL DX, $0x02 - JB matchlen_match1_standalone - MOVW (AX)(SI*1), BX - CMPW (CX)(SI*1), BX - JNE matchlen_match1_standalone - LEAL -2(DX), DX - LEAL 2(SI), SI - -matchlen_match1_standalone: - CMPL DX, $0x01 - JB gen_match_len_end - MOVB (AX)(SI*1), BL - CMPB (CX)(SI*1), BL - JNE gen_match_len_end - INCL SI - -gen_match_len_end: - MOVQ SI, ret+48(FP) - RET diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_generic.go b/vendor/github.com/klauspost/compress/flate/matchlen_generic.go deleted file mode 100644 index ad5cd814..00000000 --- a/vendor/github.com/klauspost/compress/flate/matchlen_generic.go +++ /dev/null @@ -1,33 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package flate - -import ( - "encoding/binary" - "math/bits" -) - -// matchLen returns the maximum common prefix length of a and b. -// a must be the shortest of the two. -func matchLen(a, b []byte) (n int) { - for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { - diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) - if diff != 0 { - return n + bits.TrailingZeros64(diff)>>3 - } - n += 8 - } - - for i := range a { - if a[i] != b[i] { - break - } - n++ - } - return n - -} diff --git a/vendor/github.com/klauspost/compress/flate/regmask_amd64.go b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go deleted file mode 100644 index 6ed28061..00000000 --- a/vendor/github.com/klauspost/compress/flate/regmask_amd64.go +++ /dev/null @@ -1,37 +0,0 @@ -package flate - -const ( - // Masks for shifts with register sizes of the shift value. - // This can be used to work around the x86 design of shifting by mod register size. - // It can be used when a variable shift is always smaller than the register size. - - // reg8SizeMaskX - shift value is 8 bits, shifted is X - reg8SizeMask8 = 7 - reg8SizeMask16 = 15 - reg8SizeMask32 = 31 - reg8SizeMask64 = 63 - - // reg16SizeMaskX - shift value is 16 bits, shifted is X - reg16SizeMask8 = reg8SizeMask8 - reg16SizeMask16 = reg8SizeMask16 - reg16SizeMask32 = reg8SizeMask32 - reg16SizeMask64 = reg8SizeMask64 - - // reg32SizeMaskX - shift value is 32 bits, shifted is X - reg32SizeMask8 = reg8SizeMask8 - reg32SizeMask16 = reg8SizeMask16 - reg32SizeMask32 = reg8SizeMask32 - reg32SizeMask64 = reg8SizeMask64 - - // reg64SizeMaskX - shift value is 64 bits, shifted is X - reg64SizeMask8 = reg8SizeMask8 - reg64SizeMask16 = reg8SizeMask16 - reg64SizeMask32 = reg8SizeMask32 - reg64SizeMask64 = reg8SizeMask64 - - // regSizeMaskUintX - shift value is uint, shifted is X - regSizeMaskUint8 = reg8SizeMask8 - regSizeMaskUint16 = reg8SizeMask16 - regSizeMaskUint32 = reg8SizeMask32 - regSizeMaskUint64 = reg8SizeMask64 -) diff --git a/vendor/github.com/klauspost/compress/flate/regmask_other.go b/vendor/github.com/klauspost/compress/flate/regmask_other.go deleted file mode 100644 index 1b7a2cbd..00000000 --- a/vendor/github.com/klauspost/compress/flate/regmask_other.go +++ /dev/null @@ -1,40 +0,0 @@ -//go:build !amd64 -// +build !amd64 - -package flate - -const ( - // Masks for shifts with register sizes of the shift value. - // This can be used to work around the x86 design of shifting by mod register size. - // It can be used when a variable shift is always smaller than the register size. - - // reg8SizeMaskX - shift value is 8 bits, shifted is X - reg8SizeMask8 = 0xff - reg8SizeMask16 = 0xff - reg8SizeMask32 = 0xff - reg8SizeMask64 = 0xff - - // reg16SizeMaskX - shift value is 16 bits, shifted is X - reg16SizeMask8 = 0xffff - reg16SizeMask16 = 0xffff - reg16SizeMask32 = 0xffff - reg16SizeMask64 = 0xffff - - // reg32SizeMaskX - shift value is 32 bits, shifted is X - reg32SizeMask8 = 0xffffffff - reg32SizeMask16 = 0xffffffff - reg32SizeMask32 = 0xffffffff - reg32SizeMask64 = 0xffffffff - - // reg64SizeMaskX - shift value is 64 bits, shifted is X - reg64SizeMask8 = 0xffffffffffffffff - reg64SizeMask16 = 0xffffffffffffffff - reg64SizeMask32 = 0xffffffffffffffff - reg64SizeMask64 = 0xffffffffffffffff - - // regSizeMaskUintX - shift value is uint, shifted is X - regSizeMaskUint8 = ^uint(0) - regSizeMaskUint16 = ^uint(0) - regSizeMaskUint32 = ^uint(0) - regSizeMaskUint64 = ^uint(0) -) diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go deleted file mode 100644 index f3d4139e..00000000 --- a/vendor/github.com/klauspost/compress/flate/stateless.go +++ /dev/null @@ -1,318 +0,0 @@ -package flate - -import ( - "io" - "math" - "sync" -) - -const ( - maxStatelessBlock = math.MaxInt16 - // dictionary will be taken from maxStatelessBlock, so limit it. - maxStatelessDict = 8 << 10 - - slTableBits = 13 - slTableSize = 1 << slTableBits - slTableShift = 32 - slTableBits -) - -type statelessWriter struct { - dst io.Writer - closed bool -} - -func (s *statelessWriter) Close() error { - if s.closed { - return nil - } - s.closed = true - // Emit EOF block - return StatelessDeflate(s.dst, nil, true, nil) -} - -func (s *statelessWriter) Write(p []byte) (n int, err error) { - err = StatelessDeflate(s.dst, p, false, nil) - if err != nil { - return 0, err - } - return len(p), nil -} - -func (s *statelessWriter) Reset(w io.Writer) { - s.dst = w - s.closed = false -} - -// NewStatelessWriter will do compression but without maintaining any state -// between Write calls. -// There will be no memory kept between Write calls, -// but compression and speed will be suboptimal. -// Because of this, the size of actual Write calls will affect output size. -func NewStatelessWriter(dst io.Writer) io.WriteCloser { - return &statelessWriter{dst: dst} -} - -// bitWriterPool contains bit writers that can be reused. -var bitWriterPool = sync.Pool{ - New: func() interface{} { - return newHuffmanBitWriter(nil) - }, -} - -// StatelessDeflate allows compressing directly to a Writer without retaining state. -// When returning everything will be flushed. -// Up to 8KB of an optional dictionary can be given which is presumed to precede the block. -// Longer dictionaries will be truncated and will still produce valid output. -// Sending nil dictionary is perfectly fine. -func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { - var dst tokens - bw := bitWriterPool.Get().(*huffmanBitWriter) - bw.reset(out) - defer func() { - // don't keep a reference to our output - bw.reset(nil) - bitWriterPool.Put(bw) - }() - if eof && len(in) == 0 { - // Just write an EOF block. - // Could be faster... - bw.writeStoredHeader(0, true) - bw.flush() - return bw.err - } - - // Truncate dict - if len(dict) > maxStatelessDict { - dict = dict[len(dict)-maxStatelessDict:] - } - - // For subsequent loops, keep shallow dict reference to avoid alloc+copy. - var inDict []byte - - for len(in) > 0 { - todo := in - if len(inDict) > 0 { - if len(todo) > maxStatelessBlock-maxStatelessDict { - todo = todo[:maxStatelessBlock-maxStatelessDict] - } - } else if len(todo) > maxStatelessBlock-len(dict) { - todo = todo[:maxStatelessBlock-len(dict)] - } - inOrg := in - in = in[len(todo):] - uncompressed := todo - if len(dict) > 0 { - // combine dict and source - bufLen := len(todo) + len(dict) - combined := make([]byte, bufLen) - copy(combined, dict) - copy(combined[len(dict):], todo) - todo = combined - } - // Compress - if len(inDict) == 0 { - statelessEnc(&dst, todo, int16(len(dict))) - } else { - statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict) - } - isEof := eof && len(in) == 0 - - if dst.n == 0 { - bw.writeStoredHeader(len(uncompressed), isEof) - if bw.err != nil { - return bw.err - } - bw.writeBytes(uncompressed) - } else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 { - // If we removed less than 1/16th, huffman compress the block. - bw.writeBlockHuff(isEof, uncompressed, len(in) == 0) - } else { - bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0) - } - if len(in) > 0 { - // Retain a dict if we have more - inDict = inOrg[len(uncompressed)-maxStatelessDict:] - dict = nil - dst.Reset() - } - if bw.err != nil { - return bw.err - } - } - if !eof { - // Align, only a stored block can do that. - bw.writeStoredHeader(0, false) - } - bw.flush() - return bw.err -} - -func hashSL(u uint32) uint32 { - return (u * 0x1e35a7bd) >> slTableShift -} - -func load3216(b []byte, i int16) uint32 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:4] - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load6416(b []byte, i int16) uint64 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:8] - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -func statelessEnc(dst *tokens, src []byte, startAt int16) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - - type tableEntry struct { - offset int16 - } - - var table [slTableSize]tableEntry - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src)-int(startAt) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = 0 - return - } - // Index until startAt - if startAt > 0 { - cv := load3232(src, 0) - for i := int16(0); i < startAt; i++ { - table[hashSL(cv)] = tableEntry{offset: i} - cv = (cv >> 8) | (uint32(src[i+4]) << 24) - } - } - - s := startAt + 1 - nextEmit := startAt - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int16(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load3216(src, s) - - for { - const skipLog = 5 - const doEvery = 2 - - nextS := s - var candidate tableEntry - for { - nextHash := hashSL(cv) - candidate = table[nextHash] - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit || nextS <= 0 { - goto emitRemainder - } - - now := load6416(src, nextS) - table[nextHash] = tableEntry{offset: s} - nextHash = hashSL(uint32(now)) - - if cv == load3216(src, candidate.offset) { - table[nextHash] = tableEntry{offset: nextS} - break - } - - // Do one right away... - cv = uint32(now) - s = nextS - nextS++ - candidate = table[nextHash] - now >>= 8 - table[nextHash] = tableEntry{offset: s} - - if cv == load3216(src, candidate.offset) { - table[nextHash] = tableEntry{offset: nextS} - break - } - cv = uint32(now) - s = nextS - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - - // Extend the 4-byte match as long as possible. - t := candidate.offset - l := int16(matchLen(src[s+4:], src[t+4:]) + 4) - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - - // Save the match found - dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - if s >= sLimit { - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-2 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load6416(src, s-2) - o := s - 2 - prevHash := hashSL(uint32(x)) - table[prevHash] = tableEntry{offset: o} - x >>= 16 - currHash := hashSL(uint32(x)) - candidate = table[currHash] - table[currHash] = tableEntry{offset: o + 2} - - if uint32(x) != load3216(src, candidate.offset) { - cv = uint32(x >> 8) - s++ - break - } - } - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - emitLiteral(dst, src[nextEmit:]) - } -} diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go deleted file mode 100644 index d818790c..00000000 --- a/vendor/github.com/klauspost/compress/flate/token.go +++ /dev/null @@ -1,379 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "math" -) - -const ( - // bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits - // bits 16-22 offsetcode - 5 bits - // bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits - // bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits - lengthShift = 22 - offsetMask = 1<maxnumlit - offHist [32]uint16 // offset codes - litHist [256]uint16 // codes 0->255 - nFilled int - n uint16 // Must be able to contain maxStoreBlockSize - tokens [maxStoreBlockSize + 1]token -} - -func (t *tokens) Reset() { - if t.n == 0 { - return - } - t.n = 0 - t.nFilled = 0 - for i := range t.litHist[:] { - t.litHist[i] = 0 - } - for i := range t.extraHist[:] { - t.extraHist[i] = 0 - } - for i := range t.offHist[:] { - t.offHist[i] = 0 - } -} - -func (t *tokens) Fill() { - if t.n == 0 { - return - } - for i, v := range t.litHist[:] { - if v == 0 { - t.litHist[i] = 1 - t.nFilled++ - } - } - for i, v := range t.extraHist[:literalCount-256] { - if v == 0 { - t.nFilled++ - t.extraHist[i] = 1 - } - } - for i, v := range t.offHist[:offsetCodeCount] { - if v == 0 { - t.offHist[i] = 1 - } - } -} - -func indexTokens(in []token) tokens { - var t tokens - t.indexTokens(in) - return t -} - -func (t *tokens) indexTokens(in []token) { - t.Reset() - for _, tok := range in { - if tok < matchType { - t.AddLiteral(tok.literal()) - continue - } - t.AddMatch(uint32(tok.length()), tok.offset()&matchOffsetOnlyMask) - } -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -func emitLiteral(dst *tokens, lit []byte) { - for _, v := range lit { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } -} - -func (t *tokens) AddLiteral(lit byte) { - t.tokens[t.n] = token(lit) - t.litHist[lit]++ - t.n++ -} - -// from https://stackoverflow.com/a/28730362 -func mFastLog2(val float32) float32 { - ux := int32(math.Float32bits(val)) - log2 := (float32)(((ux >> 23) & 255) - 128) - ux &= -0x7f800001 - ux += 127 << 23 - uval := math.Float32frombits(uint32(ux)) - log2 += ((-0.34484843)*uval+2.02466578)*uval - 0.67487759 - return log2 -} - -// EstimatedBits will return an minimum size estimated by an *optimal* -// compression of the block. -// The size of the block -func (t *tokens) EstimatedBits() int { - shannon := float32(0) - bits := int(0) - nMatches := 0 - total := int(t.n) + t.nFilled - if total > 0 { - invTotal := 1.0 / float32(total) - for _, v := range t.litHist[:] { - if v > 0 { - n := float32(v) - shannon += atLeastOne(-mFastLog2(n*invTotal)) * n - } - } - // Just add 15 for EOB - shannon += 15 - for i, v := range t.extraHist[1 : literalCount-256] { - if v > 0 { - n := float32(v) - shannon += atLeastOne(-mFastLog2(n*invTotal)) * n - bits += int(lengthExtraBits[i&31]) * int(v) - nMatches += int(v) - } - } - } - if nMatches > 0 { - invTotal := 1.0 / float32(nMatches) - for i, v := range t.offHist[:offsetCodeCount] { - if v > 0 { - n := float32(v) - shannon += atLeastOne(-mFastLog2(n*invTotal)) * n - bits += int(offsetExtraBits[i&31]) * int(v) - } - } - } - return int(shannon) + bits -} - -// AddMatch adds a match to the tokens. -// This function is very sensitive to inlining and right on the border. -func (t *tokens) AddMatch(xlength uint32, xoffset uint32) { - if debugDeflate { - if xlength >= maxMatchLength+baseMatchLength { - panic(fmt.Errorf("invalid length: %v", xlength)) - } - if xoffset >= maxMatchOffset+baseMatchOffset { - panic(fmt.Errorf("invalid offset: %v", xoffset)) - } - } - oCode := offsetCode(xoffset) - xoffset |= oCode << 16 - - t.extraHist[lengthCodes1[uint8(xlength)]]++ - t.offHist[oCode&31]++ - t.tokens[t.n] = token(matchType | xlength<= maxMatchOffset+baseMatchOffset { - panic(fmt.Errorf("invalid offset: %v", xoffset)) - } - } - oc := offsetCode(xoffset) - xoffset |= oc << 16 - for xlength > 0 { - xl := xlength - if xl > 258 { - // We need to have at least baseMatchLength left over for next loop. - if xl > 258+baseMatchLength { - xl = 258 - } else { - xl = 258 - baseMatchLength - } - } - xlength -= xl - xl -= baseMatchLength - t.extraHist[lengthCodes1[uint8(xl)]]++ - t.offHist[oc&31]++ - t.tokens[t.n] = token(matchType | uint32(xl)<> lengthShift) } - -// Convert length to code. -func lengthCode(len uint8) uint8 { return lengthCodes[len] } - -// Returns the offset code corresponding to a specific offset -func offsetCode(off uint32) uint32 { - if false { - if off < uint32(len(offsetCodes)) { - return offsetCodes[off&255] - } else if off>>7 < uint32(len(offsetCodes)) { - return offsetCodes[(off>>7)&255] + 14 - } else { - return offsetCodes[(off>>14)&255] + 28 - } - } - if off < uint32(len(offsetCodes)) { - return offsetCodes[uint8(off)] - } - return offsetCodes14[uint8(off>>7)] -} diff --git a/vendor/github.com/mainflux/agent/pkg/agent/config.go b/vendor/github.com/mainflux/agent/pkg/agent/config.go deleted file mode 100644 index 9f82c3b2..00000000 --- a/vendor/github.com/mainflux/agent/pkg/agent/config.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package agent - -import ( - "crypto/tls" - "encoding/json" - "fmt" - "os" - "time" - - "github.com/mainflux/mainflux/pkg/errors" - "github.com/pelletier/go-toml" -) - -type ServerConfig struct { - Port string `toml:"port" json:"port"` - NatsURL string `toml:"nats_url" json:"nats_url"` -} - -type ChanConfig struct { - Control string `toml:"control"` - Data string `toml:"data"` -} - -type EdgexConfig struct { - URL string `toml:"url"` -} - -type LogConfig struct { - Level string `toml:"level"` -} - -type MQTTConfig struct { - URL string `json:"url" toml:"url"` - Username string `json:"username" toml:"username" mapstructure:"username"` - Password string `json:"password" toml:"password" mapstructure:"password"` - MTLS bool `json:"mtls" toml:"mtls" mapstructure:"mtls"` - SkipTLSVer bool `json:"skip_tls_ver" toml:"skip_tls_ver" mapstructure:"skip_tls_ver"` - Retain bool `json:"retain" toml:"retain" mapstructure:"retain"` - QoS byte `json:"qos" toml:"qos" mapstructure:"qos"` - CAPath string `json:"ca_path" toml:"ca_path" mapstructure:"ca_path"` - CertPath string `json:"cert_path" toml:"cert_path" mapstructure:"cert_path"` - PrivKeyPath string `json:"priv_key_path" toml:"priv_key_path" mapstructure:"priv_key_path"` - CA []byte `json:"-" toml:"-"` - Cert tls.Certificate `json:"-" toml:"-"` - ClientCert string `json:"client_cert" toml:"client_cert"` - ClientKey string `json:"client_key" toml:"client_key"` - CaCert string `json:"ca_cert" toml:"ca_cert"` -} - -type HeartbeatConfig struct { - Interval time.Duration `toml:"interval"` -} - -type TerminalConfig struct { - SessionTimeout time.Duration `toml:"session_timeout" json:"session_timeout"` -} - -type Config struct { - Server ServerConfig `toml:"server" json:"server"` - Terminal TerminalConfig `toml:"terminal" json:"terminal"` - Heartbeat HeartbeatConfig `toml:"heartbeat" json:"heartbeat"` - Channels ChanConfig `toml:"channels" json:"channels"` - Edgex EdgexConfig `toml:"edgex" json:"edgex"` - Log LogConfig `toml:"log" json:"log"` - MQTT MQTTConfig `toml:"mqtt" json:"mqtt"` - File string -} - -func NewConfig(sc ServerConfig, cc ChanConfig, ec EdgexConfig, lc LogConfig, mc MQTTConfig, hc HeartbeatConfig, tc TerminalConfig, file string) Config { - return Config{ - Server: sc, - Channels: cc, - Edgex: ec, - Log: lc, - MQTT: mc, - Heartbeat: hc, - Terminal: tc, - File: file, - } -} - -// Save - store config in a file. -func SaveConfig(c Config) error { - b, err := toml.Marshal(c) - if err != nil { - return errors.New(fmt.Sprintf("Error reading config file: %s", err)) - } - if err := os.WriteFile(c.File, b, 0644); err != nil { - return errors.New(fmt.Sprintf("Error writing toml: %s", err)) - } - return nil -} - -// Read - retrieve config from a file. -func ReadConfig(file string) (Config, error) { - data, err := os.ReadFile(file) - c := Config{} - if err != nil { - return c, errors.New(fmt.Sprintf("Error reading config file: %s", err)) - } - - if err := toml.Unmarshal(data, &c); err != nil { - return Config{}, errors.New(fmt.Sprintf("Error unmarshaling toml: %s", err)) - } - return c, nil -} - -// UnmarshalJSON parses the duration from JSON. -func (d *HeartbeatConfig) UnmarshalJSON(b []byte) error { - var v map[string]interface{} - if err := json.Unmarshal(b, &v); err != nil { - return err - } - interval, ok := v["interval"] - if !ok { - return errors.New("missing value") - } - switch value := interval.(type) { - case float64: - d.Interval = time.Duration(value) - return nil - case string: - var err error - d.Interval, err = time.ParseDuration(value) - if err != nil { - return err - } - return nil - default: - return errors.New("invalid duration") - } -} - -// UnmarshalJSON parses the duration from JSON. -func (d *TerminalConfig) UnmarshalJSON(b []byte) error { - var v map[string]interface{} - if err := json.Unmarshal(b, &v); err != nil { - return err - } - session_timeout, ok := v["session_timeout"] - if !ok { - return errors.New("missing value") - } - switch value := session_timeout.(type) { - case float64: - d.SessionTimeout = time.Duration(value) - return nil - case string: - var err error - d.SessionTimeout, err = time.ParseDuration(value) - if err != nil { - return err - } - return nil - default: - return errors.New("invalid duration") - } -} diff --git a/vendor/github.com/mainflux/agent/pkg/agent/heartbeat.go b/vendor/github.com/mainflux/agent/pkg/agent/heartbeat.go deleted file mode 100644 index 3d9b2efc..00000000 --- a/vendor/github.com/mainflux/agent/pkg/agent/heartbeat.go +++ /dev/null @@ -1,79 +0,0 @@ -package agent - -import ( - "sync" - "time" -) - -const ( - online = "online" - offline = "offline" -) - -// svc keeps info on service live status. -// Services send heartbeat to nats thus updating last seen. -// When service doesnt send heartbeat for some time gets marked offline. -type svc struct { - info Info - interval time.Duration - ticker *time.Ticker - mu sync.Mutex -} - -type Info struct { - Name string `json:"name"` - LastSeen time.Time `json:"last_seen"` - Status string `json:"status"` - Type string `json:"type"` - Terminal int `json:"terminal"` -} - -// Heartbeat specifies api for updating status and keeping track on services -// that are sending heartbeat to NATS. -type Heartbeat interface { - Update() - Info() Info -} - -// interval - duration of interval -// if service doesnt send heartbeat during interval it is marked offline. -func NewHeartbeat(name, svcType string, interval time.Duration) Heartbeat { - ticker := time.NewTicker(interval) - s := svc{ - info: Info{ - Name: name, - Status: online, - Type: svcType, - LastSeen: time.Now(), - }, - ticker: ticker, - interval: interval, - } - s.listen() - return &s -} - -func (s *svc) listen() { - go func() { - for range s.ticker.C { - // TODO - we can disable ticker when the status gets OFFLINE - // and on the next heartbeat enable it again. - s.mu.Lock() - if time.Now().After(s.info.LastSeen.Add(s.interval)) { - s.info.Status = offline - } - s.mu.Unlock() - } - }() -} - -func (s *svc) Update() { - s.mu.Lock() - defer s.mu.Unlock() - s.info.LastSeen = time.Now() - s.info.Status = online -} - -func (s *svc) Info() Info { - return s.info -} diff --git a/vendor/github.com/mainflux/agent/pkg/agent/service.go b/vendor/github.com/mainflux/agent/pkg/agent/service.go deleted file mode 100644 index 5daa65a1..00000000 --- a/vendor/github.com/mainflux/agent/pkg/agent/service.go +++ /dev/null @@ -1,405 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package agent - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "os/exec" - "sort" - "strings" - "time" - - paho "github.com/eclipse/paho.mqtt.golang" - "github.com/mainflux/agent/pkg/edgex" - "github.com/mainflux/agent/pkg/encoder" - "github.com/mainflux/agent/pkg/terminal" - - exp "github.com/mainflux/export/pkg/config" - log "github.com/mainflux/mainflux/logger" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/nats-io/nats.go" -) - -const ( - Path = "./config.toml" - Hearbeat = "heartbeat.>" - Commands = "commands" - config = "config" - - view = "view" - save = "save" - - char = "c" - open = "open" - close = "close" - control = "control" - data = "data" - - export = "export" -) - -var ( - // errInvalidCommand indicates malformed command. - errInvalidCommand = errors.New("invalid command") - - // ErrMalformedEntity indicates malformed entity specification. - ErrMalformedEntity = errors.New("malformed entity specification") - - // ErrInvalidQueryParams indicates malformed URL. - ErrInvalidQueryParams = errors.New("invalid query params") - - // errUnknownCommand indicates that command is not found. - errUnknownCommand = errors.New("Unknown command") - - // errNatsSubscribing indicates problem with sub to topic for heartbeat. - errNatsSubscribing = errors.New("failed to subscribe to heartbeat topic") - - // errNoSuchService indicates service not supported. - errNoSuchService = errors.New("no such service") - - // errFailedEncode indicates error in encoding. - errFailedEncode = errors.New("failed to encode") - - // errFailedToPublish. - errFailedToPublish = errors.New("failed to publish") - - // errEdgexFailed. - errEdgexFailed = errors.New("failed to execute edgex operation") - - // errFailedExecute. - errFailedExecute = errors.New("failed to execute command") - - // errFailedToCreateTerminalSession. - errFailedToCreateTerminalSession = errors.New("failed to create terminal session") - - // errNoSuchTerminalSession terminal session doesnt exist error on closing. - errNoSuchTerminalSession = errors.New("no such terminal session") -) - -// Service specifies API for publishing messages and subscribing to topics. -type Service interface { - // Execute command. - Execute(string, string) (string, error) - - // Control command. - Control(string, string) error - - // Update configuration file. - AddConfig(Config) error - - // Config returns Config struct created from config file. - Config() Config - - // Saves config file. - ServiceConfig(uuid, cmdStr string) error - - // Services returns service list. - Services() []Info - - // Terminal used for terminal control of gateway. - Terminal(string, string) error - - // Publish message. - Publish(string, string) error -} - -var _ Service = (*agent)(nil) - -type agent struct { - mqttClient paho.Client - config *Config - edgexClient edgex.Client - logger log.Logger - nats *nats.Conn - svcs map[string]Heartbeat - terminals map[string]terminal.Session -} - -// New returns agent service implementation. -func New(mc paho.Client, cfg *Config, ec edgex.Client, nc *nats.Conn, logger log.Logger) (Service, error) { - ag := &agent{ - mqttClient: mc, - edgexClient: ec, - config: cfg, - nats: nc, - logger: logger, - svcs: make(map[string]Heartbeat), - terminals: make(map[string]terminal.Session), - } - - if cfg.Heartbeat.Interval <= 0 { - ag.logger.Error(fmt.Sprintf("invalid heartbeat interval %d", cfg.Heartbeat.Interval)) - } - - _, err := ag.nats.Subscribe(Hearbeat, func(msg *nats.Msg) { - sub := msg.Subject - tok := strings.Split(sub, ".") - if len(tok) < 3 { - ag.logger.Error(fmt.Sprintf("Failed: Subject has incorrect length %s", sub)) - return - } - svcname := tok[1] - svctype := tok[2] - // Service name is extracted from the subtopic - // if there is multiple instances of the same service - // we will have to add another distinction. - if _, ok := ag.svcs[svcname]; !ok { - svc := NewHeartbeat(svcname, svctype, cfg.Heartbeat.Interval) - ag.svcs[svcname] = svc - ag.logger.Info(fmt.Sprintf("Services '%s-%s' registered", svcname, svctype)) - } - serv := ag.svcs[svcname] - serv.Update() - }) - - if err != nil { - return ag, errors.Wrap(errNatsSubscribing, err) - } - - return ag, nil - -} - -func (a *agent) Execute(uuid, cmd string) (string, error) { - cmdArr := strings.Split(strings.ReplaceAll(cmd, " ", ""), ",") - if len(cmdArr) < 2 { - return "", errInvalidCommand - } - - out, err := exec.Command(cmdArr[0], cmdArr[1:]...).CombinedOutput() - if err != nil { - return "", errors.Wrap(errFailedExecute, err) - } - - payload, err := encoder.EncodeSenML(uuid, cmdArr[0], string(out)) - if err != nil { - return "", errors.Wrap(errFailedEncode, err) - } - - if err := a.Publish(control, string(payload)); err != nil { - return "", errors.Wrap(errFailedToPublish, err) - } - - return string(payload), nil -} - -func (a *agent) Control(uuid, cmdStr string) error { - cmdArgs := strings.Split(strings.ReplaceAll(cmdStr, " ", ""), ",") - if len(cmdArgs) < 2 { - return errInvalidCommand - } - - var resp string - var err error - - cmd := cmdArgs[0] - switch cmd { - case "edgex-operation": - resp, err = a.edgexClient.PushOperation(cmdArgs[1:]) - case "edgex-config": - resp, err = a.edgexClient.FetchConfig(cmdArgs[1:]) - case "edgex-metrics": - resp, err = a.edgexClient.FetchMetrics(cmdArgs[1:]) - case "edgex-ping": - resp, err = a.edgexClient.Ping() - default: - err = errUnknownCommand - } - - if err != nil { - return errors.Wrap(errEdgexFailed, err) - } - - return a.processResponse(uuid, cmd, resp) -} - -// Message for this command -// [{"bn":"1:", "n":"services", "vs":"view"}] -// [{"bn":"1:", "n":"config", "vs":"save, export, filename, filecontent"}] -// config_file_content is base64 encoded marshaled structure representing service conf -// Example of creation: -// -// b, _ := toml.Marshal(cfg) -// config_file_content := base64.StdEncoding.EncodeToString(b). -func (a *agent) ServiceConfig(uuid, cmdStr string) error { - cmdArgs := strings.Split(strings.ReplaceAll(cmdStr, " ", ""), ",") - if len(cmdArgs) < 1 { - return errInvalidCommand - } - resp := "" - cmd := cmdArgs[0] - switch cmd { - case view: - services, err := json.Marshal(a.Services()) - if err != nil { - return errors.New(err.Error()) - } - resp = string(services) - case save: - if len(cmdArgs) < 4 { - return errInvalidCommand - } - service := cmdArgs[1] - fileName := cmdArgs[2] - fileCont := cmdArgs[3] - if err := a.saveConfig(service, fileName, fileCont); err != nil { - return err - } - } - return a.processResponse(uuid, cmd, resp) -} - -func (a *agent) Terminal(uuid, cmdStr string) error { - b, err := base64.StdEncoding.DecodeString(cmdStr) - if err != nil { - return errors.New(err.Error()) - } - cmdArgs := strings.Split(string(b), ",") - if len(cmdArgs) < 1 { - return errInvalidCommand - } - - cmd := cmdArgs[0] - ch := "" - if len(cmdArgs) > 1 { - ch = cmdArgs[1] - } - switch cmd { - case char: - if err := a.terminalWrite(uuid, ch); err != nil { - return err - } - case open: - if err := a.terminalOpen(uuid, a.config.Terminal.SessionTimeout); err != nil { - return err - } - case close: - if err := a.terminalClose(uuid); err != nil { - return err - } - } - return nil -} - -func (a *agent) terminalOpen(uuid string, timeout time.Duration) error { - if _, ok := a.terminals[uuid]; !ok { - term, err := terminal.NewSession(uuid, timeout, a.Publish, a.logger) - if err != nil { - return errors.Wrap(errors.Wrap(errFailedToCreateTerminalSession, fmt.Errorf(" for %s", uuid)), err) - } - a.terminals[uuid] = term - go func() { - for range term.IsDone() { - // Terminal is inactive, should be closed. - a.logger.Debug((fmt.Sprintf("Closing terminal session %s", uuid))) - a.terminalClose(uuid) - delete(a.terminals, uuid) - return - } - }() - } - a.logger.Debug(fmt.Sprintf("Opened terminal session %s", uuid)) - return nil -} - -func (a *agent) terminalClose(uuid string) error { - if _, ok := a.terminals[uuid]; ok { - delete(a.terminals, uuid) - a.logger.Debug(fmt.Sprintf("Terminal session: %s closed", uuid)) - return nil - } - return errors.Wrap(errNoSuchTerminalSession, fmt.Errorf("session :%s", uuid)) -} - -func (a *agent) terminalWrite(uuid, cmd string) error { - if err := a.terminalOpen(uuid, a.config.Terminal.SessionTimeout); err != nil { - return err - } - term := a.terminals[uuid] - p := []byte(cmd) - return term.Send(p) -} - -func (a *agent) processResponse(uuid, cmd, resp string) error { - payload, err := encoder.EncodeSenML(uuid, cmd, resp) - if err != nil { - return errors.Wrap(errFailedEncode, err) - } - if err := a.Publish(control, string(payload)); err != nil { - return errors.Wrap(errFailedToPublish, err) - } - return nil -} - -func (a *agent) saveConfig(service, fileName, fileCont string) error { - switch service { - case export: - content, err := base64.StdEncoding.DecodeString(fileCont) - if err != nil { - return errors.New(err.Error()) - } - c, err := exp.ReadBytes([]byte(content)) - if err != nil { - return errors.New(err.Error()) - } - c.File = fileName - if err := exp.Save(c); err != nil { - return errors.New(err.Error()) - } - - default: - return errNoSuchService - } - - return a.nats.Publish(fmt.Sprintf("%s.%s.%s", Commands, service, config), []byte("")) -} - -func (a *agent) AddConfig(c Config) error { - err := SaveConfig(c) - return errors.New(err.Error()) -} - -func (a *agent) Config() Config { - return *a.config -} - -func (a *agent) Services() []Info { - svcInfos := []Info{} - keys := []string{} - for k := range a.svcs { - keys = append(keys, k) - } - sort.Strings(keys) - for _, key := range keys { - service := a.svcs[key].Info() - svcInfos = append(svcInfos, service) - } - return svcInfos -} - -func (a *agent) Publish(t, payload string) error { - topic := a.getTopic(t) - mqtt := a.config.MQTT - token := a.mqttClient.Publish(topic, mqtt.QoS, mqtt.Retain, payload) - token.Wait() - err := token.Error() - if err != nil { - return errors.New(err.Error()) - } - return nil -} - -func (a *agent) getTopic(topic string) (t string) { - switch topic { - case control: - t = fmt.Sprintf("channels/%s/messages/res", a.config.Channels.Control) - case data: - t = fmt.Sprintf("channels/%s/messages/res", a.config.Channels.Data) - default: - t = fmt.Sprintf("channels/%s/messages/res/%s", a.config.Channels.Control, topic) - } - return t -} diff --git a/vendor/github.com/mainflux/agent/pkg/bootstrap/bootstrap.go b/vendor/github.com/mainflux/agent/pkg/bootstrap/bootstrap.go deleted file mode 100644 index a24ceef2..00000000 --- a/vendor/github.com/mainflux/agent/pkg/bootstrap/bootstrap.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package bootstrap - -import ( - "crypto/tls" - "crypto/x509" - "encoding/json" - "io" - "os" - - "fmt" - "net/http" - "strconv" - "time" - - "github.com/mainflux/agent/pkg/agent" - - export "github.com/mainflux/export/pkg/config" - "github.com/mainflux/mainflux/bootstrap" - log "github.com/mainflux/mainflux/logger" - errors "github.com/mainflux/mainflux/pkg/errors" -) - -const exportConfigFile = "/configs/export/config.toml" - -// Config represents the parameters for bootstrapping. -type Config struct { - URL string - ID string - Key string - Retries string - RetryDelaySec string - Encrypt string - SkipTLS bool -} - -type ServicesConfig struct { - Agent agent.Config `json:"agent"` - Export export.Config `json:"export"` -} - -type ConfigContent struct { - Content string `json:"content"` -} - -type deviceConfig struct { - MainfluxID string `json:"mainflux_id"` - MainfluxKey string `json:"mainflux_key"` - MainfluxChannels []bootstrap.Channel `json:"mainflux_channels"` - ClientKey string `json:"client_key"` - ClientCert string `json:"client_cert"` - CaCert string `json:"ca_cert"` - SvcsConf ServicesConfig `json:"-"` -} - -// Bootstrap - Retrieve device config. -func Bootstrap(cfg Config, logger log.Logger, file string) error { - retries, err := strconv.ParseUint(cfg.Retries, 10, 64) - if err != nil { - return errors.New(fmt.Sprintf("Invalid BOOTSTRAP_RETRIES value: %s", err)) - } - - if retries == 0 { - logger.Info("No bootstrapping, environment variables will be used") - return nil - } - - retryDelaySec, err := strconv.ParseUint(cfg.RetryDelaySec, 10, 64) - if err != nil { - return errors.New(fmt.Sprintf("Invalid BOOTSTRAP_RETRY_DELAY_SECONDS value: %s", err)) - } - - logger.Info(fmt.Sprintf("Requesting config for %s from %s", cfg.ID, cfg.URL)) - - dc := deviceConfig{} - - for i := 0; i < int(retries); i++ { - dc, err = getConfig(cfg.ID, cfg.Key, cfg.URL, cfg.SkipTLS, logger) - if err == nil { - break - } - logger.Error(fmt.Sprintf("Fetching bootstrap failed with error: %s", err)) - logger.Debug(fmt.Sprintf("Retries remaining: %d. Retrying in %d seconds", retries, retryDelaySec)) - time.Sleep(time.Duration(retryDelaySec) * time.Second) - if i == int(retries)-1 { - logger.Warn("Retries exhausted") - logger.Info("Continuing with local config") - return nil - } - } - - if len(dc.MainfluxChannels) < 2 { - return agent.ErrMalformedEntity - } - - ctrlChan := dc.MainfluxChannels[0].ID - dataChan := dc.MainfluxChannels[1].ID - if dc.MainfluxChannels[0].Metadata["type"] == "data" { - ctrlChan = dc.MainfluxChannels[1].ID - dataChan = dc.MainfluxChannels[0].ID - } - - sc := dc.SvcsConf.Agent.Server - cc := agent.ChanConfig{ - Control: ctrlChan, - Data: dataChan, - } - ec := dc.SvcsConf.Agent.Edgex - lc := dc.SvcsConf.Agent.Log - - mc := dc.SvcsConf.Agent.MQTT - mc.Password = dc.MainfluxKey - mc.Username = dc.MainfluxID - mc.ClientCert = dc.ClientCert - mc.ClientKey = dc.ClientKey - mc.CaCert = dc.CaCert - - hc := dc.SvcsConf.Agent.Heartbeat - tc := dc.SvcsConf.Agent.Terminal - c := agent.NewConfig(sc, cc, ec, lc, mc, hc, tc, file) - - dc.SvcsConf.Export = fillExportConfig(dc.SvcsConf.Export, c) - - saveExportConfig(dc.SvcsConf.Export, logger) - - return agent.SaveConfig(c) -} - -// if export config isnt filled use agent configs. -func fillExportConfig(econf export.Config, c agent.Config) export.Config { - if econf.MQTT.Username == "" { - econf.MQTT.Username = c.MQTT.Username - } - if econf.MQTT.Password == "" { - econf.MQTT.Password = c.MQTT.Password - } - if econf.MQTT.ClientCert == "" { - econf.MQTT.ClientCert = c.MQTT.ClientCert - } - if econf.MQTT.ClientCertKey == "" { - econf.MQTT.ClientCertKey = c.MQTT.ClientKey - } - if econf.MQTT.ClientCertPath == "" { - econf.MQTT.ClientCertPath = c.MQTT.CertPath - } - if econf.MQTT.ClientPrivKeyPath == "" { - econf.MQTT.ClientPrivKeyPath = c.MQTT.PrivKeyPath - } - for i, route := range econf.Routes { - if route.MqttTopic == "" { - econf.Routes[i].MqttTopic = "channels/" + c.Channels.Data + "/messages" - } - } - return econf -} - -func saveExportConfig(econf export.Config, logger log.Logger) { - if econf.File == "" { - econf.File = exportConfigFile - } - exConfFileExist := false - if _, err := os.Stat(econf.File); err == nil { - exConfFileExist = true - logger.Info(fmt.Sprintf("Export config file %s exists", econf.File)) - } - if !exConfFileExist { - logger.Info(fmt.Sprintf("Saving export config file %s", econf.File)) - if err := export.Save(econf); err != nil { - logger.Warn(fmt.Sprintf("Failed to save export config file %s", err)) - } - } -} - -func getConfig(bsID, bsKey, bsSvrURL string, skipTLS bool, logger log.Logger) (deviceConfig, error) { - // Get the SystemCertPool, continue with an empty pool on error. - rootCAs, err := x509.SystemCertPool() - if err != nil { - logger.Error(err.Error()) - } - if rootCAs == nil { - rootCAs = x509.NewCertPool() - } - // Trust the augmented cert pool in our client. - config := &tls.Config{ - InsecureSkipVerify: skipTLS, - RootCAs: rootCAs, - } - tr := &http.Transport{TLSClientConfig: config} - client := &http.Client{Transport: tr} - url := fmt.Sprintf("%s/%s", bsSvrURL, bsID) - - req, err := http.NewRequest(http.MethodGet, url, nil) - if err != nil { - return deviceConfig{}, err - } - - req.Header.Add("Authorization", fmt.Sprintf("Thing %s", bsKey)) - resp, err := client.Do(req) - if err != nil { - return deviceConfig{}, err - } - if resp.StatusCode >= http.StatusBadRequest { - return deviceConfig{}, errors.New(http.StatusText(resp.StatusCode)) - } - - body, err := io.ReadAll(resp.Body) - if err != nil { - return deviceConfig{}, err - } - defer resp.Body.Close() - dc := deviceConfig{} - h := ConfigContent{} - if err := json.Unmarshal([]byte(body), &h); err != nil { - return deviceConfig{}, err - } - fmt.Println(h.Content) - sc := ServicesConfig{} - if err := json.Unmarshal([]byte(h.Content), &sc); err != nil { - return deviceConfig{}, err - } - if err := json.Unmarshal([]byte(body), &dc); err != nil { - return deviceConfig{}, err - } - dc.SvcsConf = sc - return dc, nil -} diff --git a/vendor/github.com/mainflux/agent/pkg/edgex/client.go b/vendor/github.com/mainflux/agent/pkg/edgex/client.go deleted file mode 100644 index 03ec4ec1..00000000 --- a/vendor/github.com/mainflux/agent/pkg/edgex/client.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package edgex - -import ( - "bytes" - "encoding/json" - "io" - "net/http" - "strings" - - log "github.com/mainflux/mainflux/logger" - - model "github.com/edgexfoundry/go-mod-core-contracts/models" -) - -type Client interface { - - // PushOperation - pushes operation to EdgeX components. - PushOperation([]string) (string, error) - - // FetchConfig - fetches config from EdgeX components. - FetchConfig([]string) (string, error) - - // FetchMetrics - fetches metrics from EdgeX components. - FetchMetrics(cmdArr []string) (string, error) - - // Ping - ping EdgeX SMA. - Ping() (string, error) -} - -type edgexClient struct { - url string - logger log.Logger -} - -// NewClient - Creates ne EdgeX client. -func NewClient(edgexURL string, logger log.Logger) Client { - return &edgexClient{ - url: edgexURL, - logger: logger, - } -} - -// PushOperation - pushes operation to EdgeX components. -func (ec *edgexClient) PushOperation(cmdArr []string) (string, error) { - url := ec.url + "operation" - - m := model.Operation{ - Action: cmdArr[0], - Services: cmdArr[1:], - } - data, err := json.Marshal(m) - if err != nil { - return "", err - } - - resp, err := http.Post(url, "application/json", bytes.NewReader(data)) - if err != nil { - return "", err - } - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - if err != nil { - return "", err - } - return string(body), nil -} - -// FetchConfig - fetches config from EdgeX components. -func (ec *edgexClient) FetchConfig(cmdArr []string) (string, error) { - cmdStr := strings.ReplaceAll(strings.Join(cmdArr, ","), " ", "") - url := ec.url + "config/" + cmdStr - - resp, err := http.Get(url) - if err != nil { - return "", err - } - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - if err != nil { - return "", err - } - return string(body), nil -} - -// FetchMetrics - fetches metrics from EdgeX components. -func (ec *edgexClient) FetchMetrics(cmdArr []string) (string, error) { - cmdStr := strings.ReplaceAll(strings.Join(cmdArr, ","), " ", "") - url := ec.url + "metrics/" + cmdStr - - resp, err := http.Get(url) - if err != nil { - - return "", err - } - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - if err != nil { - return "", err - } - return string(body), nil -} - -// Ping - ping EdgeX SMA. -func (ec *edgexClient) Ping() (string, error) { - url := ec.url + "ping" - - resp, err := http.Get(url) - if err != nil { - return "", err - } - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - if err != nil { - return "", err - } - - return string(body), nil -} diff --git a/vendor/github.com/mainflux/agent/pkg/encoder/encoder.go b/vendor/github.com/mainflux/agent/pkg/encoder/encoder.go deleted file mode 100644 index a08be46e..00000000 --- a/vendor/github.com/mainflux/agent/pkg/encoder/encoder.go +++ /dev/null @@ -1,26 +0,0 @@ -package encoder - -import ( - "time" - - "github.com/mainflux/senml" -) - -func EncodeSenML(bn, n, sv string) ([]byte, error) { - ts := float64(time.Now().UnixNano()) / float64(time.Second) - s := senml.Pack{ - Records: []senml.Record{ - senml.Record{ - BaseName: bn, - Name: n, - Time: ts, - StringValue: &sv, - }, - }, - } - payload, err := senml.Encode(s, senml.JSON) - if err != nil { - return nil, err - } - return payload, nil -} diff --git a/vendor/github.com/mainflux/agent/pkg/terminal/terminal.go b/vendor/github.com/mainflux/agent/pkg/terminal/terminal.go deleted file mode 100644 index 6987a3ea..00000000 --- a/vendor/github.com/mainflux/agent/pkg/terminal/terminal.go +++ /dev/null @@ -1,126 +0,0 @@ -package terminal - -import ( - "bytes" - "fmt" - "io" - "os" - "os/exec" - "sync" - "time" - - "github.com/creack/pty" - - "github.com/mainflux/agent/pkg/encoder" - "github.com/mainflux/mainflux/logger" - "github.com/mainflux/mainflux/pkg/errors" -) - -const ( - terminal = "term" - second = time.Duration(1 * time.Second) -) - -type term struct { - uuid string - ptmx *os.File - done chan bool - topic string - timeout time.Duration - resetTimeout time.Duration - timer *time.Ticker - publish func(channel, payload string) error - logger logger.Logger - mu sync.Mutex -} - -type Session interface { - Send(p []byte) error - IsDone() chan bool - io.Writer -} - -func NewSession(uuid string, timeout time.Duration, publish func(channel, payload string) error, logger logger.Logger) (Session, error) { - t := &term{ - logger: logger, - uuid: uuid, - publish: publish, - timeout: timeout, - resetTimeout: timeout, - topic: fmt.Sprintf("term/%s", uuid), - done: make(chan bool), - } - - c := exec.Command("bash") - ptmx, err := pty.Start(c) - if err != nil { - return t, errors.New(err.Error()) - } - t.ptmx = ptmx - - // Copy output to mqtt - go func() { - n, err := io.Copy(t, t.ptmx) - if err != nil { - t.logger.Error(fmt.Sprintf("Error sending data: %s", err)) - } - t.logger.Debug(fmt.Sprintf("Data being sent: %d", n)) - }() - - t.timer = time.NewTicker(1 * time.Second) - - go func() { - for range t.timer.C { - t.decrementCounter() - } - t.logger.Debug("exiting timer routine") - }() - - return t, nil -} - -func (t *term) resetCounter(timeout time.Duration) { - t.mu.Lock() - defer t.mu.Unlock() - if timeout > 0 { - t.timeout = timeout - return - } -} -func (t *term) decrementCounter() { - t.mu.Lock() - defer t.mu.Unlock() - t.timeout -= second - if t.timeout == 0 { - t.done <- true - t.timer.Stop() - } -} - -func (t *term) IsDone() chan bool { - return t.done -} - -func (t *term) Write(p []byte) (int, error) { - t.resetCounter(t.resetTimeout) - n := len(p) - payload, err := encoder.EncodeSenML(t.uuid, terminal, string(p)) - if err != nil { - return n, err - } - - if err := t.publish(t.topic, string(payload)); err != nil { - return n, err - } - return n, nil -} - -func (t *term) Send(p []byte) error { - in := bytes.NewReader(p) - nr, err := io.Copy(t.ptmx, in) - t.logger.Debug(fmt.Sprintf("Written to ptmx: %d", nr)) - if err != nil { - return errors.New(err.Error()) - } - return nil -} diff --git a/vendor/github.com/mainflux/export/pkg/config/config.go b/vendor/github.com/mainflux/export/pkg/config/config.go deleted file mode 100644 index 5ff7f0f1..00000000 --- a/vendor/github.com/mainflux/export/pkg/config/config.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Package writers contain the domaSavein concept definitions needed to -// support Mainflux writer services functionality. -package config - -import ( - "crypto/tls" - "encoding/json" - "os" - - "github.com/mainflux/mainflux/pkg/errors" - "github.com/pelletier/go-toml" -) - -const ( - dfltFile = "config.toml" -) - -var ( - errReadConfigFile = errors.New("Error reading config file") - errWritingConfigFile = errors.New("Error writing config file") - errUnmarshalConfigContent = errors.New("Error unmarshaling config file conent") -) - -type MQTT struct { - Host string `json:"host" toml:"host" mapstructure:"host"` - Username string `json:"username" toml:"username" mapstructure:"username"` - Password string `json:"password" toml:"password" mapstructure:"password"` - MTLS bool `json:"mtls" toml:"mtls" mapstructure:"mtls"` - SkipTLSVer bool `json:"skip_tls_ver" toml:"skip_tls_ver" mapstructure:"skip_tls_ver"` - Retain bool `json:"retain" toml:"retain" mapstructure:"retain"` - QoS int `json:"qos" toml:"qos" mapstructure:"qos"` - CAPath string `json:"ca_path" toml:"ca_path" mapstructure:"ca_path"` - ClientCertPath string `json:"client_cert_path" toml:"client_cert_path" mapstructure:"client_cert_path"` - ClientPrivKeyPath string `json:"client_priv_key_path" toml:"client_priv_key_path" mapstructure:"client_priv_key_path"` - ClientCert string `json:"client_cert" toml:"client_cert" mapstructure:"client_cert"` - ClientCertKey string `json:"client_cert_key" toml:"client_cert_key" mapstructure:"client_cert_key"` - CA []byte `json:"-" toml:"-"` - TLSCert tls.Certificate `json:"-" toml:"-"` -} - -type Server struct { - NatsURL string `json:"nats" toml:"nats" mapstructure:"nats"` - LogLevel string `json:"log_level" toml:"log_level" mapstructure:"log_level"` - Port string `json:"port" toml:"port" mapstructure:"port"` - CacheURL string `json:"cache_url" toml:"cache_url" mapstructure:"port"` - CachePass string `json:"cache_pass" toml:"cache_pass" mapstructure:"port"` - CacheDB string `json:"cache_db" toml:"cache_db" mapstructure:"port"` -} - -type Config struct { - Server Server `json:"exp" toml:"exp" mapstructure:"exp"` - Routes []Route `json:"routes" toml:"routes" mapstructure:"routes"` - MQTT MQTT `json:"mqtt" toml:"mqtt" mapstructure:"mqtt"` - File string `json:"file"` -} - -type Route struct { - MqttTopic string `json:"mqtt_topic" toml:"mqtt_topic" mapstructure:"mqtt_topic"` - NatsTopic string `json:"nats_topic" toml:"nats_topic" mapstructure:"nats_topic"` - SubTopic string `json:"subtopic" toml:"subtopic" mapstructure:"subtopic"` - Type string `json:"type" toml:"type" mapstructure:"type"` - Workers int `json:"workers" toml:"workers" mapstructure:"workers"` -} - -// Save - store config in a file. -func Save(c Config) error { - b, err := toml.Marshal(c) - if err != nil { - return errors.Wrap(errReadConfigFile, err) - } - file := dfltFile - if c.File != "" { - file = c.File - } - if err := os.WriteFile(file, b, 0644); err != nil { - return errors.Wrap(errWritingConfigFile, err) - } - - return nil -} - -// ReadFile - retrieve config from a file. -func ReadFile(file string) (Config, error) { - c := Config{} - data, err := os.ReadFile(file) - if err != nil { - return c, errors.Wrap(errReadConfigFile, err) - } - if err := toml.Unmarshal(data, &c); err != nil { - return c, errors.Wrap(errUnmarshalConfigContent, err) - } - c.File = file - return c, nil -} - -// ReadBytes - read config from a bytes. -func ReadBytes(data []byte) (Config, error) { - c := Config{} - e := toml.Unmarshal(data, &c) - if e == nil { - return c, nil - } - err := errors.Wrap(errUnmarshalConfigContent, e) - if e := json.Unmarshal(data, &c); e != nil { - return c, errors.Wrap(err, e) - } - return c, nil -} diff --git a/vendor/github.com/mainflux/mainflux/LICENSE b/vendor/github.com/mainflux/mainflux/LICENSE deleted file mode 100644 index a7503008..00000000 --- a/vendor/github.com/mainflux/mainflux/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015-2020 Mainflux - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/mainflux/mainflux/bootstrap/README.md b/vendor/github.com/mainflux/mainflux/bootstrap/README.md deleted file mode 100644 index 27ac2113..00000000 --- a/vendor/github.com/mainflux/mainflux/bootstrap/README.md +++ /dev/null @@ -1,125 +0,0 @@ -# BOOTSTRAP SERVICE - -New devices need to be configured properly and connected to the Mainflux. Bootstrap service is used in order to accomplish that. This service provides the following features: - -1. Creating new Mainflux Things -2. Providing basic configuration for the newly created Things -3. Enabling/disabling Things - -Pre-provisioning a new Thing is as simple as sending Configuration data to the Bootstrap service. Once the Thing is online, it sends a request for initial config to Bootstrap service. Bootstrap service provides an API for enabling and disabling Things. Only enabled Things can exchange messages over Mainflux. Bootstrapping does not implicitly enable Things, it has to be done manually. - -In order to bootstrap successfully, the Thing needs to send bootstrapping request to the specific URL, as well as a secret key. This key and URL are pre-provisioned during the manufacturing process. If the Thing is provisioned on the Bootstrap service side, the corresponding configuration will be sent as a response. Otherwise, the Thing will be saved so that it can be provisioned later. - -## Thing Configuration Entity - -Thing Configuration consists of two logical parts: the custom configuration that can be interpreted by the Thing itself and Mainflux-related configuration. Mainflux config contains: - -1. corresponding Mainflux Thing ID -2. corresponding Mainflux Thing key -3. list of the Mainflux channels the Thing is connected to - -> Note: list of channels contains IDs of the Mainflux channels. These channels are _pre-provisioned_ on the Mainflux side and, unlike corresponding Mainflux Thing, Bootstrap service is not able to create Mainflux Channels. - -Enabling and disabling Thing (adding Thing to/from whitelist) is as simple as connecting corresponding Mainflux Thing to the given list of Channels. Configuration keeps _state_ of the Thing: - -| State | What it means | -| -------- | ------------------------------------------- | -| Inactive | Thing is created, but isn't enabled | -| Active | Thing is able to communicate using Mainflux | - -Switching between states `Active` and `Inactive` enables and disables Thing, respectively. - -Thing configuration also contains the so-called `external ID` and `external key`. An external ID is a unique identifier of corresponding Thing. For example, a device MAC address is a good choice for external ID. External key is a secret key that is used for authentication during the bootstrapping procedure. - -## Configuration - -The service is configured using the environment variables presented in the following table. Note that any unset variables will be replaced with their default values. - -| Variable | Description | Default | -| ----------------------------- | ----------------------------------------------------------------------- | -------------------------------------------------- | -| MF_BOOTSTRAP_LOG_LEVEL | Log level for Bootstrap (debug, info, warn, error) | info | -| MF_BOOTSTRAP_DB_HOST | Database host address | localhost | -| MF_BOOTSTRAP_DB_PORT | Database host port | 5432 | -| MF_BOOTSTRAP_DB_USER | Database user | mainflux | -| MF_BOOTSTRAP_DB_PASS | Database password | mainflux | -| MF_BOOTSTRAP_DB_NAME | Name of the database used by the service | bootstrap | -| MF_BOOTSTRAP_DB_SSL_MODE | Database connection SSL mode (disable, require, verify-ca, verify-full) | disable | -| MF_BOOTSTRAP_DB_SSL_CERT | Path to the PEM encoded certificate file | | -| MF_BOOTSTRAP_DB_SSL_KEY | Path to the PEM encoded key file | | -| MF_BOOTSTRAP_DB_SSL_ROOT_CERT | Path to the PEM encoded root certificate file | | -| MF_BOOTSTRAP_ENCRYPT_KEY | Secret key for secure bootstrapping encryption | v7aT0HGxJxt2gULzr3RHwf4WIf6DusPphG5Ftm2bNCWD8mTpyr | -| MF_BOOTSTRAP_HTTP_HOST | Bootstrap service HTTP host | | -| MF_BOOTSTRAP_HTTP_PORT | Bootstrap service HTTP port | 9013 | -| MF_BOOTSTRAP_HTTP_SERVER_CERT | Path to server certificate in pem format | | -| MF_BOOTSTRAP_HTTP_SERVER_KEY | Path to server key in pem format | | -| MF_BOOTSTRAP_EVENT_CONSUMER | Bootstrap service event source consumer name | bootstrap | -| MF_BOOTSTRAP_ES_URL | Bootstrap service event source URL | localhost:6379 | -| MF_BOOTSTRAP_ES_PASS | Bootstrap service event source password | | -| MF_BOOTSTRAP_ES_DB | Bootstrap service event source database | 0 | -| MF_AUTH_GRPC_URL | Users service gRPC URL | localhost:7001 | -| MF_AUTH_GRPC_TIMEOUT | Users service gRPC request timeout in seconds | 1s | -| MF_AUTH_GRPC_CLIENT_TLS | Enable TLS for gRPC client | false | -| MF_AUTH_GRPC_CA_CERTS | CA certificates for gRPC client | | -| MF_THINGS_URL | Base url for Mainflux Things | http://localhost:9000 | -| MF_JAEGER_URL | Jaeger server URL | http://jaeger:14268/api/traces | -| MF_SEND_TELEMETRY | Send telemetry to mainflux call home server | true | -| MF_BOOTSTRAP_INSTANCE_ID | Bootstrap service instance ID | | - -## Deployment - -The service itself is distributed as Docker container. Check the [`boostrap`](https://github.com/mainflux/mainflux/blob/master/docker/addons/bootstrap/docker-compose.yml#L32-L56) service section in -docker-compose to see how service is deployed. - -To start the service outside of the container, execute the following shell script: - -```bash -# download the latest version of the service -git clone https://github.com/mainflux/mainflux - -cd mainflux - -# compile the service -make bootstrap - -# copy binary to bin -make install - -# set the environment variables and run the service -MF_BOOTSTRAP_LOG_LEVEL=[Bootstrap log level] \ -MF_BOOTSTRAP_ENCRYPT_KEY=[Hex-encoded encryption key used for secure bootstrap] \ -MF_BOOTSTRAP_EVENT_CONSUMER=[Bootstrap service event source consumer name] \ -MF_BOOTSTRAP_ES_URL=[Bootstrap service event source URL] \ -MF_BOOTSTRAP_ES_PASS=[Bootstrap service event source password] \ -MF_BOOTSTRAP_ES_DB=[Bootstrap service event source database] \ -MF_BOOTSTRAP_HTTP_HOST=[Bootstrap service HTTP host] \ -MF_BOOTSTRAP_HTTP_PORT=[Bootstrap service HTTP port] \ -MF_BOOTSTRAP_HTTP_SERVER_CERT=[Path to HTTP server certificate in pem format] \ -MF_BOOTSTRAP_HTTP_SERVER_KEY=[Path to HTTP server key in pem format] \ -MF_BOOTSTRAP_DB_HOST=[Database host address] \ -MF_BOOTSTRAP_DB_PORT=[Database host port] \ -MF_BOOTSTRAP_DB_USER=[Database user] \ -MF_BOOTSTRAP_DB_PASS=[Database password] \ -MF_BOOTSTRAP_DB_NAME=[Name of the database used by the service] \ -MF_BOOTSTRAP_DB_SSL_MODE=[SSL mode to connect to the database with] \ -MF_BOOTSTRAP_DB_SSL_CERT=[Path to the PEM encoded certificate file] \ -MF_BOOTSTRAP_DB_SSL_KEY=[Path to the PEM encoded key file] \ -MF_BOOTSTRAP_DB_SSL_ROOT_CERT=[Path to the PEM encoded root certificate file] \ -MF_AUTH_GRPC_URL=[Users service gRPC URL] \ -MF_AUTH_GRPC_TIMEOUT=[Users service gRPC request timeout in seconds] \ -MF_AUTH_GRPC_CLIENT_TLS=[Boolean value to enable/disable client TLS] \ -MF_AUTH_GRPC_CA_CERT=[Path to trusted CAs in PEM format] \ -MF_THINGS_URL=[Base url for Mainflux Things] \ -MF_JAEGER_URL=[Jaeger server URL] \ -MF_SEND_TELEMETRY=[Send telemetry to mainflux call home server] \ -MF_BOOTSTRAP_INSTANCE_ID=[Bootstrap instance ID] \ -$GOBIN/mainflux-bootstrap -``` - -Setting `MF_BOOTSTRAP_CA_CERTS` expects a file in PEM format of trusted CAs. This will enable TLS against the Users gRPC endpoint trusting only those CAs that are provided. - -## Usage - -For more information about service capabilities and its usage, please check out -the [API documentation](https://api.mainflux.io/?urls.primaryName=bootstrap-openapi.yml). - -[doc]: https://docs.mainflux.io diff --git a/vendor/github.com/mainflux/mainflux/bootstrap/configs.go b/vendor/github.com/mainflux/mainflux/bootstrap/configs.go deleted file mode 100644 index 9f5cd70c..00000000 --- a/vendor/github.com/mainflux/mainflux/bootstrap/configs.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package bootstrap - -import ( - "context" - "time" - - "github.com/mainflux/mainflux/pkg/clients" -) - -// Config represents Configuration entity. It wraps information about external entity -// as well as info about corresponding Mainflux entities. -// MFThing represents corresponding Mainflux Thing ID. -// MFKey is key of corresponding Mainflux Thing. -// MFChannels is a list of Mainflux Channels corresponding Mainflux Thing connects to. -type Config struct { - ThingID string `json:"thing_id"` - Owner string `json:"owner,omitempty"` - Name string `json:"name,omitempty"` - ClientCert string `json:"client_cert,omitempty"` - ClientKey string `json:"client_key,omitempty"` - CACert string `json:"ca_cert,omitempty"` - ThingKey string `json:"thing_key"` - Channels []Channel `json:"channels,omitempty"` - ExternalID string `json:"external_id"` - ExternalKey string `json:"external_key"` - Content string `json:"content,omitempty"` - State State `json:"state"` -} - -// Channel represents Mainflux channel corresponding Mainflux Thing is connected to. -type Channel struct { - ID string `json:"id"` - Name string `json:"name,omitempty"` - Metadata map[string]interface{} `json:"metadata,omitempty"` - Owner string `json:"owner_id"` - Parent string `json:"parent_id,omitempty"` - Description string `json:"description,omitempty"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at,omitempty"` - UpdatedBy string `json:"updated_by,omitempty"` - Status clients.Status `json:"status"` -} - -// Filter is used for the search filters. -type Filter struct { - FullMatch map[string]string - PartialMatch map[string]string -} - -// ConfigsPage contains page related metadata as well as list of Configs that -// belong to this page. -type ConfigsPage struct { - Total uint64 `json:"total"` - Offset uint64 `json:"offset"` - Limit uint64 `json:"limit"` - Configs []Config `json:"configs"` -} - -// ConfigRepository specifies a Config persistence API. -type ConfigRepository interface { - // Save persists the Config. Successful operation is indicated by non-nil - // error response. - Save(ctx context.Context, cfg Config, chsConnIDs []string) (string, error) - - // RetrieveByID retrieves the Config having the provided identifier, that is owned - // by the specified user. - RetrieveByID(ctx context.Context, owner, id string) (Config, error) - - // RetrieveAll retrieves a subset of Configs that are owned - // by the specific user, with given filter parameters. - RetrieveAll(ctx context.Context, owner string, filter Filter, offset, limit uint64) ConfigsPage - - // RetrieveByExternalID returns Config for given external ID. - RetrieveByExternalID(ctx context.Context, externalID string) (Config, error) - - // Update updates an existing Config. A non-nil error is returned - // to indicate operation failure. - Update(ctx context.Context, cfg Config) error - - // UpdateCerts updates and returns an existing Config certificate and owner. - // A non-nil error is returned to indicate operation failure. - UpdateCert(ctx context.Context, owner, thingID, clientCert, clientKey, caCert string) (Config, error) - - // UpdateConnections updates a list of Channels the Config is connected to - // adding new Channels if needed. - UpdateConnections(ctx context.Context, owner, id string, channels []Channel, connections []string) error - - // Remove removes the Config having the provided identifier, that is owned - // by the specified user. - Remove(ctx context.Context, owner, id string) error - - // ChangeState changes of the Config, that is owned by the specific user. - ChangeState(ctx context.Context, owner, id string, state State) error - - // ListExisting retrieves those channels from the given list that exist in DB. - ListExisting(ctx context.Context, owner string, ids []string) ([]Channel, error) - - // Methods RemoveThing, UpdateChannel, and RemoveChannel are related to - // event sourcing. That's why these methods surpass ownership check. - - // RemoveThing removes Config of the Thing with the given ID. - RemoveThing(ctx context.Context, id string) error - - // UpdateChannel updates channel with the given ID. - UpdateChannel(ctx context.Context, c Channel) error - - // RemoveChannel removes channel with the given ID. - RemoveChannel(ctx context.Context, id string) error - - // DisconnectHandler changes state of the Config when the corresponding Thing is - // disconnected from the Channel. - DisconnectThing(ctx context.Context, channelID, thingID string) error -} diff --git a/vendor/github.com/mainflux/mainflux/bootstrap/doc.go b/vendor/github.com/mainflux/mainflux/bootstrap/doc.go deleted file mode 100644 index 266f62d1..00000000 --- a/vendor/github.com/mainflux/mainflux/bootstrap/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Package bootstrap contains the domain concept definitions needed to support -// Mainflux bootstrap service functionality. -package bootstrap diff --git a/vendor/github.com/mainflux/mainflux/bootstrap/reader.go b/vendor/github.com/mainflux/mainflux/bootstrap/reader.go deleted file mode 100644 index afcff466..00000000 --- a/vendor/github.com/mainflux/mainflux/bootstrap/reader.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package bootstrap - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/rand" - "encoding/json" - "io" - "net/http" -) - -// bootstrapRes represent Mainflux Response to the Bootatrap request. -// This is used as a response from ConfigReader and can easily be -// replace with any other response format. -type bootstrapRes struct { - ThingID string `json:"thing_id"` - ThingKey string `json:"thing_key"` - Channels []channelRes `json:"channels"` - Content string `json:"content,omitempty"` - ClientCert string `json:"client_cert,omitempty"` - ClientKey string `json:"client_key,omitempty"` - CACert string `json:"ca_cert,omitempty"` -} - -type channelRes struct { - ID string `json:"id"` - Name string `json:"name,omitempty"` - Metadata interface{} `json:"metadata,omitempty"` -} - -func (res bootstrapRes) Code() int { - return http.StatusOK -} - -func (res bootstrapRes) Headers() map[string]string { - return map[string]string{} -} - -func (res bootstrapRes) Empty() bool { - return false -} - -type reader struct { - encKey []byte -} - -// NewConfigReader return new reader which is used to generate response -// from the config. -func NewConfigReader(encKey []byte) ConfigReader { - return reader{encKey: encKey} -} - -func (r reader) ReadConfig(cfg Config, secure bool) (interface{}, error) { - var channels []channelRes - for _, ch := range cfg.Channels { - channels = append(channels, channelRes{ID: ch.ID, Name: ch.Name, Metadata: ch.Metadata}) - } - - res := bootstrapRes{ - ThingKey: cfg.ThingKey, - ThingID: cfg.ThingID, - Channels: channels, - Content: cfg.Content, - ClientCert: cfg.ClientCert, - ClientKey: cfg.ClientKey, - CACert: cfg.CACert, - } - if secure { - b, err := json.Marshal(res) - if err != nil { - return nil, err - } - return r.encrypt(b) - } - - return res, nil -} - -func (r reader) encrypt(in []byte) ([]byte, error) { - block, err := aes.NewCipher(r.encKey) - if err != nil { - return nil, err - } - ciphertext := make([]byte, aes.BlockSize+len(in)) - iv := ciphertext[:aes.BlockSize] - if _, err := io.ReadFull(rand.Reader, iv); err != nil { - return nil, err - } - stream := cipher.NewCFBEncrypter(block, iv) - stream.XORKeyStream(ciphertext[aes.BlockSize:], in) - return ciphertext, nil -} diff --git a/vendor/github.com/mainflux/mainflux/bootstrap/service.go b/vendor/github.com/mainflux/mainflux/bootstrap/service.go deleted file mode 100644 index be88f99d..00000000 --- a/vendor/github.com/mainflux/mainflux/bootstrap/service.go +++ /dev/null @@ -1,486 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package bootstrap - -import ( - "context" - "crypto/aes" - "crypto/cipher" - "encoding/hex" - "time" - - "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/pkg/errors" - mfsdk "github.com/mainflux/mainflux/pkg/sdk/go" -) - -var ( - // ErrThings indicates failure to communicate with Mainflux Things service. - // It can be due to networking error or invalid/unauthenticated request. - ErrThings = errors.New("failed to receive response from Things service") - - // ErrExternalKey indicates a non-existent bootstrap configuration for given external key. - ErrExternalKey = errors.New("failed to get bootstrap configuration for given external key") - - // ErrExternalKeySecure indicates error in getting bootstrap configuration for given encrypted external key. - ErrExternalKeySecure = errors.New("failed to get bootstrap configuration for given encrypted external key") - - // ErrBootstrap indicates error in getting bootstrap configuration. - ErrBootstrap = errors.New("failed to read bootstrap configuration") - - errAddBootstrap = errors.New("failed to add bootstrap configuration") - errUpdateConnections = errors.New("failed to update connections") - errRemoveBootstrap = errors.New("failed to remove bootstrap configuration") - errChangeState = errors.New("failed to change state of bootstrap configuration") - errUpdateChannel = errors.New("failed to update channel") - errRemoveConfig = errors.New("failed to remove bootstrap configuration") - errRemoveChannel = errors.New("failed to remove channel") - errCreateThing = errors.New("failed to create thing") - errDisconnectThing = errors.New("failed to disconnect thing") - errCheckChannels = errors.New("failed to check if channels exists") - errConnectionChannels = errors.New("failed to check channels connections") - errThingNotFound = errors.New("failed to find thing") - errUpdateCert = errors.New("failed to update cert") -) - -var _ Service = (*bootstrapService)(nil) - -// Service specifies an API that must be fulfilled by the domain service -// implementation, and all of its decorators (e.g. logging & metrics). -type Service interface { - // Add adds new Thing Config to the user identified by the provided token. - Add(ctx context.Context, token string, cfg Config) (Config, error) - - // View returns Thing Config with given ID belonging to the user identified by the given token. - View(ctx context.Context, token, id string) (Config, error) - - // Update updates editable fields of the provided Config. - Update(ctx context.Context, token string, cfg Config) error - - // UpdateCert updates an existing Config certificate and token. - // A non-nil error is returned to indicate operation failure. - UpdateCert(ctx context.Context, token, thingID, clientCert, clientKey, caCert string) (Config, error) - - // UpdateConnections updates list of Channels related to given Config. - UpdateConnections(ctx context.Context, token, id string, connections []string) error - - // List returns subset of Configs with given search params that belong to the - // user identified by the given token. - List(ctx context.Context, token string, filter Filter, offset, limit uint64) (ConfigsPage, error) - - // Remove removes Config with specified token that belongs to the user identified by the given token. - Remove(ctx context.Context, token, id string) error - - // Bootstrap returns Config to the Thing with provided external ID using external key. - Bootstrap(ctx context.Context, externalKey, externalID string, secure bool) (Config, error) - - // ChangeState changes state of the Thing with given ID and owner. - ChangeState(ctx context.Context, token, id string, state State) error - - // Methods RemoveConfig, UpdateChannel, and RemoveChannel are used as - // handlers for events. That's why these methods surpass ownership check. - - // UpdateChannelHandler updates Channel with data received from an event. - UpdateChannelHandler(ctx context.Context, channel Channel) error - - // RemoveConfigHandler removes Configuration with id received from an event. - RemoveConfigHandler(ctx context.Context, id string) error - - // RemoveChannelHandler removes Channel with id received from an event. - RemoveChannelHandler(ctx context.Context, id string) error - - // DisconnectHandler changes state of the Config when connect/disconnect event occurs. - DisconnectThingHandler(ctx context.Context, channelID, thingID string) error -} - -// ConfigReader is used to parse Config into format which will be encoded -// as a JSON and consumed from the client side. The purpose of this interface -// is to provide convenient way to generate custom configuration response -// based on the specific Config which will be consumed by the client. -type ConfigReader interface { - ReadConfig(Config, bool) (interface{}, error) -} - -type bootstrapService struct { - auth mainflux.AuthServiceClient - configs ConfigRepository - sdk mfsdk.SDK - encKey []byte -} - -// New returns new Bootstrap service. -func New(auth mainflux.AuthServiceClient, configs ConfigRepository, sdk mfsdk.SDK, encKey []byte) Service { - return &bootstrapService{ - configs: configs, - sdk: sdk, - auth: auth, - encKey: encKey, - } -} - -func (bs bootstrapService) Add(ctx context.Context, token string, cfg Config) (Config, error) { - owner, err := bs.identify(ctx, token) - if err != nil { - return Config{}, err - } - - toConnect := bs.toIDList(cfg.Channels) - - // Check if channels exist. This is the way to prevent fetching channels that already exist. - existing, err := bs.configs.ListExisting(ctx, owner, toConnect) - if err != nil { - return Config{}, errors.Wrap(errCheckChannels, err) - } - - cfg.Channels, err = bs.connectionChannels(toConnect, bs.toIDList(existing), token) - - if err != nil { - return Config{}, errors.Wrap(errConnectionChannels, err) - } - - id := cfg.ThingID - mfThing, err := bs.thing(id, token) - if err != nil { - return Config{}, errors.Wrap(errThingNotFound, err) - } - - cfg.ThingID = mfThing.ID - cfg.Owner = owner - cfg.State = Inactive - cfg.ThingKey = mfThing.Credentials.Secret - - saved, err := bs.configs.Save(ctx, cfg, toConnect) - if err != nil { - if id == "" { - if _, errT := bs.sdk.DisableThing(cfg.ThingID, token); errT != nil { - err = errors.Wrap(err, errT) - } - } - return Config{}, errors.Wrap(errAddBootstrap, err) - } - - cfg.ThingID = saved - cfg.Channels = append(cfg.Channels, existing...) - - return cfg, nil -} - -func (bs bootstrapService) View(ctx context.Context, token, id string) (Config, error) { - owner, err := bs.identify(ctx, token) - if err != nil { - return Config{}, err - } - - return bs.configs.RetrieveByID(ctx, owner, id) -} - -func (bs bootstrapService) Update(ctx context.Context, token string, cfg Config) error { - owner, err := bs.identify(ctx, token) - if err != nil { - return err - } - - cfg.Owner = owner - - return bs.configs.Update(ctx, cfg) -} - -func (bs bootstrapService) UpdateCert(ctx context.Context, token, thingID, clientCert, clientKey, caCert string) (Config, error) { - owner, err := bs.identify(ctx, token) - if err != nil { - return Config{}, err - } - cfg, err := bs.configs.UpdateCert(ctx, owner, thingID, clientCert, clientKey, caCert) - if err != nil { - return Config{}, errors.Wrap(errUpdateCert, err) - } - return cfg, nil -} - -func (bs bootstrapService) UpdateConnections(ctx context.Context, token, id string, connections []string) error { - owner, err := bs.identify(ctx, token) - if err != nil { - return err - } - - cfg, err := bs.configs.RetrieveByID(ctx, owner, id) - if err != nil { - return errors.Wrap(errUpdateConnections, err) - } - - add, remove := bs.updateList(cfg, connections) - - // Check if channels exist. This is the way to prevent fetching channels that already exist. - existing, err := bs.configs.ListExisting(ctx, owner, connections) - if err != nil { - return errors.Wrap(errUpdateConnections, err) - } - - channels, err := bs.connectionChannels(connections, bs.toIDList(existing), token) - if err != nil { - return errors.Wrap(errUpdateConnections, err) - } - - cfg.Channels = channels - var connect, disconnect []string - - if cfg.State == Active { - connect = add - disconnect = remove - } - - for _, c := range disconnect { - if err := bs.sdk.DisconnectThing(id, c, token); err != nil { - if errors.Contains(err, errors.ErrNotFound) { - continue - } - return ErrThings - } - } - - for _, c := range connect { - conIDs := mfsdk.Connection{ - ChannelID: c, - ThingID: id, - } - if err := bs.sdk.Connect(conIDs, token); err != nil { - return ErrThings - } - } - - return bs.configs.UpdateConnections(ctx, owner, id, channels, connections) -} - -func (bs bootstrapService) List(ctx context.Context, token string, filter Filter, offset, limit uint64) (ConfigsPage, error) { - owner, err := bs.identify(ctx, token) - if err != nil { - return ConfigsPage{}, err - } - - return bs.configs.RetrieveAll(ctx, owner, filter, offset, limit), nil -} - -func (bs bootstrapService) Remove(ctx context.Context, token, id string) error { - owner, err := bs.identify(ctx, token) - if err != nil { - return err - } - if err := bs.configs.Remove(ctx, owner, id); err != nil { - return errors.Wrap(errRemoveBootstrap, err) - } - return nil -} - -func (bs bootstrapService) Bootstrap(ctx context.Context, externalKey, externalID string, secure bool) (Config, error) { - cfg, err := bs.configs.RetrieveByExternalID(ctx, externalID) - if err != nil { - return cfg, errors.Wrap(ErrBootstrap, err) - } - if secure { - dec, err := bs.dec(externalKey) - if err != nil { - return Config{}, errors.Wrap(ErrExternalKeySecure, err) - } - externalKey = dec - } - if cfg.ExternalKey != externalKey { - return Config{}, ErrExternalKey - } - - return cfg, nil -} - -func (bs bootstrapService) ChangeState(ctx context.Context, token, id string, state State) error { - owner, err := bs.identify(ctx, token) - if err != nil { - return err - } - - cfg, err := bs.configs.RetrieveByID(ctx, owner, id) - if err != nil { - return errors.Wrap(errChangeState, err) - } - - if cfg.State == state { - return nil - } - - switch state { - case Active: - for _, c := range cfg.Channels { - conIDs := mfsdk.Connection{ - ChannelID: c.ID, - ThingID: cfg.ThingID, - } - if err := bs.sdk.Connect(conIDs, token); err != nil { - return ErrThings - } - } - case Inactive: - for _, c := range cfg.Channels { - if err := bs.sdk.DisconnectThing(cfg.ThingID, c.ID, token); err != nil { - if errors.Contains(err, errors.ErrNotFound) { - continue - } - return ErrThings - } - } - } - if err := bs.configs.ChangeState(ctx, owner, id, state); err != nil { - return errors.Wrap(errChangeState, err) - } - return nil -} - -func (bs bootstrapService) UpdateChannelHandler(ctx context.Context, channel Channel) error { - if err := bs.configs.UpdateChannel(ctx, channel); err != nil { - return errors.Wrap(errUpdateChannel, err) - } - return nil -} - -func (bs bootstrapService) RemoveConfigHandler(ctx context.Context, id string) error { - if err := bs.configs.RemoveThing(ctx, id); err != nil { - return errors.Wrap(errRemoveConfig, err) - } - return nil -} - -func (bs bootstrapService) RemoveChannelHandler(ctx context.Context, id string) error { - if err := bs.configs.RemoveChannel(ctx, id); err != nil { - return errors.Wrap(errRemoveChannel, err) - } - return nil -} - -func (bs bootstrapService) DisconnectThingHandler(ctx context.Context, channelID, thingID string) error { - if err := bs.configs.DisconnectThing(ctx, channelID, thingID); err != nil { - return errors.Wrap(errDisconnectThing, err) - } - return nil -} - -func (bs bootstrapService) identify(ctx context.Context, token string) (string, error) { - ctx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - - res, err := bs.auth.Identify(ctx, &mainflux.IdentityReq{Token: token}) - if err != nil { - return "", errors.ErrAuthentication - } - - return res.GetId(), nil -} - -// Method thing retrieves Mainflux Thing creating one if an empty ID is passed. -func (bs bootstrapService) thing(id, token string) (mfsdk.Thing, error) { - var thing mfsdk.Thing - var err error - var sdkErr errors.SDKError - - thing.ID = id - if id == "" { - thing, sdkErr = bs.sdk.CreateThing(mfsdk.Thing{}, token) - if err != nil { - return mfsdk.Thing{}, errors.Wrap(errCreateThing, errors.New(sdkErr.Err().Msg())) - } - } - - thing, sdkErr = bs.sdk.Thing(thing.ID, token) - if sdkErr != nil { - err = errors.New(sdkErr.Error()) - if id != "" { - if _, sdkErr2 := bs.sdk.DisableThing(thing.ID, token); sdkErr2 != nil { - err = errors.Wrap(errors.New(sdkErr.Msg()), errors.New(sdkErr2.Msg())) - } - } - return mfsdk.Thing{}, errors.Wrap(ErrThings, err) - } - - return thing, nil -} - -func (bs bootstrapService) connectionChannels(channels, existing []string, token string) ([]Channel, error) { - add := make(map[string]bool, len(channels)) - for _, ch := range channels { - add[ch] = true - } - - for _, ch := range existing { - if add[ch] { - delete(add, ch) - } - } - - var ret []Channel - for id := range add { - ch, err := bs.sdk.Channel(id, token) - if err != nil { - return nil, errors.Wrap(errors.ErrMalformedEntity, err) - } - - ret = append(ret, Channel{ - ID: ch.ID, - Name: ch.Name, - Metadata: ch.Metadata, - }) - } - - return ret, nil -} - -// Method updateList accepts config and channel IDs and returns three lists: -// 1) IDs of Channels to be added -// 2) IDs of Channels to be removed -// 3) IDs of common Channels for these two configs. -func (bs bootstrapService) updateList(cfg Config, connections []string) (add, remove []string) { - disconnect := make(map[string]bool, len(cfg.Channels)) - for _, c := range cfg.Channels { - disconnect[c.ID] = true - } - - for _, c := range connections { - if disconnect[c] { - // Don't disconnect common elements. - delete(disconnect, c) - continue - } - // Connect new elements. - add = append(add, c) - } - - for v := range disconnect { - remove = append(remove, v) - } - - return -} - -func (bs bootstrapService) toIDList(channels []Channel) []string { - var ret []string - for _, ch := range channels { - ret = append(ret, ch.ID) - } - - return ret -} - -func (bs bootstrapService) dec(in string) (string, error) { - ciphertext, err := hex.DecodeString(in) - if err != nil { - return "", err - } - block, err := aes.NewCipher(bs.encKey) - if err != nil { - return "", err - } - if len(ciphertext) < aes.BlockSize { - return "", err - } - iv := ciphertext[:aes.BlockSize] - ciphertext = ciphertext[aes.BlockSize:] - stream := cipher.NewCFBDecrypter(block, iv) - stream.XORKeyStream(ciphertext, ciphertext) - return string(ciphertext), nil -} diff --git a/vendor/github.com/mainflux/mainflux/bootstrap/state.go b/vendor/github.com/mainflux/mainflux/bootstrap/state.go deleted file mode 100644 index 3920b895..00000000 --- a/vendor/github.com/mainflux/mainflux/bootstrap/state.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package bootstrap - -import "strconv" - -const ( - // Inactive Thing is created, but not able to exchange messages using Mainflux. - Inactive State = iota - // Active Thing is created, configured, and whitelisted. - Active -) - -// State represents corresponding Mainflux Thing state. The possible Config States -// as well as description of what that State represents are given in the table: -// | State | What it means | -// |----------+--------------------------------------------------------------------------------| -// | Inactive | Thing is created, but isn't able to communicate over Mainflux | -// | Active | Thing is able to communicate using Mainflux |. -type State int - -// String returns string representation of State. -func (s State) String() string { - return strconv.Itoa(int(s)) -} diff --git a/vendor/github.com/mainflux/mainflux/doc.go b/vendor/github.com/mainflux/mainflux/doc.go deleted file mode 100644 index 5f6d6cc9..00000000 --- a/vendor/github.com/mainflux/mainflux/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Package mainflux acts as an umbrella package containing multiple different -// microservices and defines all shared domain concepts. -package mainflux diff --git a/vendor/github.com/mainflux/mainflux/pkg/clients/clients.go b/vendor/github.com/mainflux/mainflux/pkg/clients/clients.go deleted file mode 100644 index 9b5aa53e..00000000 --- a/vendor/github.com/mainflux/mainflux/pkg/clients/clients.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package clients - -import ( - "context" - "fmt" - "regexp" - "strings" - "time" - - "github.com/mainflux/mainflux/pkg/errors" - "golang.org/x/net/idna" -) - -const ( - maxLocalLen = 64 - maxDomainLen = 255 - maxTLDLen = 24 // longest TLD currently in existence - - atSeparator = "@" - dotSeparator = "." -) - -var ( - userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$") - hostRegexp = regexp.MustCompile(`^[^\s]+\.[^\s]+$`) - userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})") -) - -// Credentials represent client credentials: its -// "identity" which can be a username, email, generated name; -// and "secret" which can be a password or access token. -type Credentials struct { - Identity string `json:"identity,omitempty"` // username or generated login ID - Secret string `json:"secret,omitempty"` // password or token -} - -// Client represents generic Client. -type Client struct { - ID string `json:"id"` - Name string `json:"name,omitempty"` - Tags []string `json:"tags,omitempty"` - Owner string `json:"owner,omitempty"` // nullable - Credentials Credentials `json:"credentials,omitempty"` - Metadata Metadata `json:"metadata,omitempty"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at,omitempty"` - UpdatedBy string `json:"updated_by,omitempty"` - Status Status `json:"status"` // 1 for enabled, 0 for disabled - Role Role `json:"role,omitempty"` // 1 for admin, 0 for normal user -} - -// ClientsPage contains page related metadata as well as list -// of Clients that belong to the page. -type ClientsPage struct { - Page - Clients []Client -} - -// MembersPage contains page related metadata as well as list of members that -// belong to this page. -type MembersPage struct { - Page - Members []Client -} - -// Repository specifies an account persistence API. -type Repository interface { - // RetrieveByID retrieves client by its unique ID. - RetrieveByID(ctx context.Context, id string) (Client, error) - - // RetrieveByIdentity retrieves client by its unique credentials - RetrieveByIdentity(ctx context.Context, identity string) (Client, error) - - // RetrieveAll retrieves all clients. - RetrieveAll(ctx context.Context, pm Page) (ClientsPage, error) - - // RetrieveAllByIDs retrieves for given client IDs . - RetrieveAllByIDs(ctx context.Context, pm Page) (ClientsPage, error) - - // Update updates the client name and metadata. - Update(ctx context.Context, client Client) (Client, error) - - // UpdateTags updates the client tags. - UpdateTags(ctx context.Context, client Client) (Client, error) - - // UpdateIdentity updates identity for client with given id. - UpdateIdentity(ctx context.Context, client Client) (Client, error) - - // UpdateSecret updates secret for client with given identity. - UpdateSecret(ctx context.Context, client Client) (Client, error) - - // UpdateOwner updates owner for client with given id. - UpdateOwner(ctx context.Context, client Client) (Client, error) - - // ChangeStatus changes client status to enabled or disabled - ChangeStatus(ctx context.Context, client Client) (Client, error) -} - -// Validate returns an error if client representation is invalid. -func (u Client) Validate() error { - if !isEmail(u.Credentials.Identity) { - return errors.ErrMalformedEntity - } - return nil -} - -func isEmail(email string) bool { - if email == "" { - return false - } - - es := strings.Split(email, atSeparator) - if len(es) != 2 { - return false - } - local, host := es[0], es[1] - - if local == "" || len(local) > maxLocalLen { - return false - } - - hs := strings.Split(host, dotSeparator) - if len(hs) < 2 { - return false - } - domain, ext := hs[0], hs[1] - - // Check subdomain and validate - if len(hs) > 2 { - if domain == "" { - return false - } - - for i := 1; i < len(hs)-1; i++ { - sub := hs[i] - if sub == "" { - return false - } - domain = fmt.Sprintf("%s.%s", domain, sub) - } - - ext = hs[len(hs)-1] - } - - if domain == "" || len(domain) > maxDomainLen { - return false - } - if ext == "" || len(ext) > maxTLDLen { - return false - } - - punyLocal, err := idna.ToASCII(local) - if err != nil { - return false - } - punyHost, err := idna.ToASCII(host) - if err != nil { - return false - } - - if userDotRegexp.MatchString(punyLocal) || !userRegexp.MatchString(punyLocal) || !hostRegexp.MatchString(punyHost) { - return false - } - - return true -} diff --git a/vendor/github.com/mainflux/mainflux/pkg/clients/doc.go b/vendor/github.com/mainflux/mainflux/pkg/clients/doc.go deleted file mode 100644 index 0c5a1441..00000000 --- a/vendor/github.com/mainflux/mainflux/pkg/clients/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Package clients contains the domain concept definitions needed to support -// Mainflux clients functionality. -package clients diff --git a/vendor/github.com/mainflux/mainflux/pkg/clients/errors.go b/vendor/github.com/mainflux/mainflux/pkg/clients/errors.go deleted file mode 100644 index d0fceeac..00000000 --- a/vendor/github.com/mainflux/mainflux/pkg/clients/errors.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package clients - -import "errors" - -var ( - // ErrInvalidStatus indicates invalid status. - ErrInvalidStatus = errors.New("invalid client status") - - // ErrEnableClient indicates error in enabling client. - ErrEnableClient = errors.New("failed to enable client") - - // ErrDisableClient indicates error in disabling client. - ErrDisableClient = errors.New("failed to disable client") -) diff --git a/vendor/github.com/mainflux/mainflux/pkg/clients/page.go b/vendor/github.com/mainflux/mainflux/pkg/clients/page.go deleted file mode 100644 index 518bb1c5..00000000 --- a/vendor/github.com/mainflux/mainflux/pkg/clients/page.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package clients - -// Page contains page metadata that helps navigation. -type Page struct { - Total uint64 `json:"total"` - Offset uint64 `json:"offset"` - Limit uint64 `json:"limit"` - Name string `json:"name,omitempty"` - Order string `json:"order,omitempty"` - Dir string `json:"dir,omitempty"` - Metadata Metadata `json:"metadata,omitempty"` - Owner string `json:"owner,omitempty"` - Tag string `json:"tag,omitempty"` - Permission string `json:"permission,omitempty"` - Status Status `json:"status,omitempty"` - IDs []string `json:"ids,omitempty"` - Identity string `json:"identity,omitempty"` -} diff --git a/vendor/github.com/mainflux/mainflux/pkg/clients/roles.go b/vendor/github.com/mainflux/mainflux/pkg/clients/roles.go deleted file mode 100644 index 9196ec88..00000000 --- a/vendor/github.com/mainflux/mainflux/pkg/clients/roles.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package clients - -import ( - "encoding/json" - "strings" - - "github.com/mainflux/mainflux/internal/apiutil" -) - -// Role represents Client role. -type Role uint8 - -// Possible Client role values. -const ( - UserRole Role = iota - AdminRole -) - -// String representation of the possible role values. -const ( - Admin = "admin" - User = "user" -) - -// String converts client role to string literal. -func (cs Role) String() string { - switch cs { - case AdminRole: - return Admin - case UserRole: - return User - default: - return Unknown - } -} - -// ToRole converts string value to a valid Client role. -func ToRole(status string) (Role, error) { - switch status { - case "", User: - return UserRole, nil - case Admin: - return AdminRole, nil - } - return Role(0), apiutil.ErrInvalidRole -} - -func (r Role) MarshalJSON() ([]byte, error) { - return json.Marshal(r.String()) -} - -func (r *Role) UnmarshalJSON(data []byte) error { - str := strings.Trim(string(data), "\"") - val, err := ToRole(str) - *r = val - return err -} diff --git a/vendor/github.com/mainflux/mainflux/pkg/clients/status.go b/vendor/github.com/mainflux/mainflux/pkg/clients/status.go deleted file mode 100644 index 0dca041a..00000000 --- a/vendor/github.com/mainflux/mainflux/pkg/clients/status.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package clients - -import ( - "encoding/json" - "errors" - "strings" - - "github.com/mainflux/mainflux/internal/apiutil" -) - -// Status represents Client status. -type Status uint8 - -// Possible Client status values. -const ( - // EnabledStatus represents enabled Client. - EnabledStatus Status = iota - // DisabledStatus represents disabled Client. - DisabledStatus - - // AllStatus is used for querying purposes to list clients irrespective - // of their status - both enabled and disabled. It is never stored in the - // database as the actual Client status and should always be the largest - // value in this enumeration. - AllStatus -) - -// String representation of the possible status values. -const ( - Disabled = "disabled" - Enabled = "enabled" - All = "all" - Unknown = "unknown" -) - -// ErrStatusAlreadyAssigned indicated that the client or group has already been assigned the status. -var ErrStatusAlreadyAssigned = errors.New("status already assigned") - -// String converts client/group status to string literal. -func (s Status) String() string { - switch s { - case DisabledStatus: - return Disabled - case EnabledStatus: - return Enabled - case AllStatus: - return All - default: - return Unknown - } -} - -// ToStatus converts string value to a valid Client/Group status. -func ToStatus(status string) (Status, error) { - switch status { - case "", Enabled: - return EnabledStatus, nil - case Disabled: - return DisabledStatus, nil - case All: - return AllStatus, nil - } - return Status(0), apiutil.ErrInvalidStatus -} - -// Custom Marshaller for Client/Groups. -func (s Status) MarshalJSON() ([]byte, error) { - return json.Marshal(s.String()) -} - -// Custom Unmarshaler for Client/Groups. -func (s *Status) UnmarshalJSON(data []byte) error { - str := strings.Trim(string(data), "\"") - val, err := ToStatus(str) - *s = val - return err -} diff --git a/vendor/github.com/mainflux/mainflux/pkg/clients/types.go b/vendor/github.com/mainflux/mainflux/pkg/clients/types.go deleted file mode 100644 index e52679a1..00000000 --- a/vendor/github.com/mainflux/mainflux/pkg/clients/types.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package clients - -// Metadata represents arbitrary JSON. -type Metadata map[string]interface{} diff --git a/vendor/github.com/mainflux/mainflux/pkg/errors/doc.go b/vendor/github.com/mainflux/mainflux/pkg/errors/doc.go deleted file mode 100644 index 10803813..00000000 --- a/vendor/github.com/mainflux/mainflux/pkg/errors/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Package errors contains Mainflux errors definitions. -package errors diff --git a/vendor/github.com/mainflux/mainflux/pkg/messaging/pubsub.go b/vendor/github.com/mainflux/mainflux/pkg/messaging/pubsub.go deleted file mode 100644 index 7e4a51dd..00000000 --- a/vendor/github.com/mainflux/mainflux/pkg/messaging/pubsub.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package messaging - -import "context" - -// Publisher specifies message publishing API. -type Publisher interface { - // Publishes message to the stream. - Publish(ctx context.Context, topic string, msg *Message) error - - // Close gracefully closes message publisher's connection. - Close() error -} - -// MessageHandler represents Message handler for Subscriber. -type MessageHandler interface { - // Handle handles messages passed by underlying implementation. - Handle(msg *Message) error - - // Cancel is used for cleanup during unsubscribing and it's optional. - Cancel() error -} - -// Subscriber specifies message subscription API. -type Subscriber interface { - // Subscribe subscribes to the message stream and consumes messages. - Subscribe(ctx context.Context, id, topic string, handler MessageHandler) error - - // Unsubscribe unsubscribes from the message stream and - // stops consuming messages. - Unsubscribe(ctx context.Context, id, topic string) error - - // Close gracefully closes message subscriber's connection. - Close() error -} - -// PubSub represents aggregation interface for publisher and subscriber. -type PubSub interface { - Publisher - Subscriber -} diff --git a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/README.md b/vendor/github.com/mainflux/mainflux/pkg/sdk/go/README.md deleted file mode 100644 index 4924b898..00000000 --- a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/README.md +++ /dev/null @@ -1,82 +0,0 @@ -# Mainflux Go SDK - -Go SDK, a Go driver for Mainflux HTTP API. - -Does both system administration (provisioning) and messaging. - -## Installation -Import `"github.com/mainflux/mainflux/sdk/go"` in your Go package. - -``` -import "github.com/mainflux/mainflux/pkg/sdk/go"``` - -Then call SDK Go functions to interact with the system. - -## API Reference - -```go -FUNCTIONS - -func NewMfxSDK(host, port string, tls bool) *MfxSDK - -func (sdk *MfxSDK) Channel(id, token string) (things.Channel, error) - Channel - gets channel by ID - -func (sdk *MfxSDK) Channels(token string) ([]things.Channel, error) - Channels - gets all channels - -func (sdk *MfxSDK) Connect(struct{[]string, []string}, token string) error - Connect - connect things to channels - -func (sdk *MfxSDK) CreateChannel(data, token string) (string, error) - CreateChannel - creates new channel and generates UUID - -func (sdk *MfxSDK) CreateThing(data, token string) (string, error) - CreateThing - creates new thing and generates thing UUID - -func (sdk *MfxSDK) CreateToken(user, pwd string) (string, error) - CreateToken - create user token - -func (sdk *MfxSDK) CreateUser(user, pwd string) error - CreateUser - create user - -func (sdk *MfxSDK) User(pwd string) (user, error) - User - gets user - -func (sdk *MfxSDK) UpdateUser(user, pwd string) error - UpdateUser - update user - -func (sdk *MfxSDK) UpdatePassword(user, pwd string) error - UpdatePassword - update user password - -func (sdk *MfxSDK) DeleteChannel(id, token string) error - DeleteChannel - removes channel - -func (sdk *MfxSDK) DeleteThing(id, token string) error - DeleteThing - removes thing - -func (sdk *MfxSDK) DisconnectThing(thingID, chanID, token string) error - DisconnectThing - connect thing to a channel - -func (sdk mfSDK) SendMessage(chanID, msg, token string) error - SendMessage - send message on Mainflux channel - -func (sdk mfSDK) SetContentType(ct ContentType) error - SetContentType - set message content type. Available options are SenML - JSON, custom JSON and custom binary (octet-stream). - -func (sdk mfSDK) Thing(id, token string) (Thing, error) - Thing - gets thing by ID - -func (sdk mfSDK) Things(token string) ([]Thing, error) - Things - gets all things - -func (sdk mfSDK) UpdateChannel(channel Channel, token string) error - UpdateChannel - update a channel - -func (sdk mfSDK) UpdateThing(thing Thing, token string) error - UpdateThing - updates thing by ID - -func (sdk mfSDK) Health() (mainflux.Health, error) - Health - things service health check -``` diff --git a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/doc.go b/vendor/github.com/mainflux/mainflux/pkg/sdk/go/doc.go deleted file mode 100644 index 0ceacb69..00000000 --- a/vendor/github.com/mainflux/mainflux/pkg/sdk/go/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Package sdk contains Mainflux SDK. -package sdk diff --git a/vendor/github.com/mainflux/mainflux/pkg/transformers/README.md b/vendor/github.com/mainflux/mainflux/pkg/transformers/README.md deleted file mode 100644 index f30ac5bc..00000000 --- a/vendor/github.com/mainflux/mainflux/pkg/transformers/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# Message Transformers - -A transformer service consumes events published by Mainflux adapters (such as MQTT and HTTP adapters) and transforms them to an arbitrary message format. A transformer can be imported as a standalone package and used for message transformation on the consumer side. - -Mainflux [SenML transformer](transformer) is an example of Transformer service for SenML messages. - -Mainflux [writers](writers) are using a standalone SenML transformer to preprocess messages before storing them. - -[transformers]: https://github.com/mainflux/mainflux/tree/master/transformers/senml -[writers]: https://github.com/mainflux/mainflux/tree/master/writers diff --git a/vendor/github.com/mainflux/mainflux/pkg/transformers/senml/README.md b/vendor/github.com/mainflux/mainflux/pkg/transformers/senml/README.md deleted file mode 100644 index d75118cc..00000000 --- a/vendor/github.com/mainflux/mainflux/pkg/transformers/senml/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# SenML Message Transformer - -SenML Transformer provides Message Transformer for SenML messages. -It supports JSON and CBOR content types - To transform Mainflux Message successfully, the payload must be either JSON or CBOR encoded SenML message. diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE similarity index 100% rename from vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE similarity index 100% rename from vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore similarity index 100% rename from vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile similarity index 100% rename from vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go similarity index 83% rename from vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go index 258c0636..7c08e564 100644 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go @@ -19,9 +19,10 @@ import ( "errors" "io" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" ) +// TODO: Give error package name prefix in next minor release. var errInvalidVarint = errors.New("invalid varint32 encountered") // ReadDelimited decodes a message from the provided length-delimited stream, @@ -36,6 +37,12 @@ var errInvalidVarint = errors.New("invalid varint32 encountered") // of the stream has been reached in doing so. In that case, any subsequent // calls return (0, io.EOF). func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { + // TODO: Consider allowing the caller to specify a decode buffer in the + // next major version. + + // TODO: Consider using error wrapping to annotate error state in pass- + // through cases in the next minor version. + // Per AbstractParser#parsePartialDelimitedFrom with // CodedInputStream#readRawVarint32. var headerBuf [binary.MaxVarintLen32]byte @@ -53,15 +60,14 @@ func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { if err != nil { return bytesRead, err } - // A Reader should not return (0, nil), but if it does, - // it should be treated as no-op (according to the - // Reader contract). So let's go on... + // A Reader should not return (0, nil); but if it does, it should + // be treated as no-op according to the Reader contract. continue } bytesRead += newBytesRead // Now present everything read so far to the varint decoder and // see if a varint can be decoded already. - messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) + messageLength, varIntBytes = binary.Uvarint(headerBuf[:bytesRead]) } messageBuf := make([]byte, messageLength) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go similarity index 100% rename from vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go similarity index 91% rename from vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go index 8fb59ad2..e58dd9d2 100644 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go @@ -18,7 +18,7 @@ import ( "encoding/binary" "io" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" ) // WriteDelimited encodes and dumps a message to the provided writer prefixed @@ -28,6 +28,9 @@ import ( // number of bytes written and any applicable error. This is roughly // equivalent to the companion Java API's MessageLite#writeDelimitedTo. func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { + // TODO: Consider allowing the caller to specify an encode buffer in the + // next major version. + buffer, err := proto.Marshal(m) if err != nil { return 0, err diff --git a/vendor/github.com/nats-io/nats.go/.gitignore b/vendor/github.com/nats-io/nats.go/.gitignore deleted file mode 100644 index ae4871f4..00000000 --- a/vendor/github.com/nats-io/nats.go/.gitignore +++ /dev/null @@ -1,45 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -# Emacs -*~ -\#*\# -.\#* - -# vi/vim -.??*.swp - -# Mac -.DS_Store - -# Eclipse -.project -.settings/ - -# bin - -# Goland -.idea - -# VS Code -.vscode \ No newline at end of file diff --git a/vendor/github.com/nats-io/nats.go/.golangci.yaml b/vendor/github.com/nats-io/nats.go/.golangci.yaml deleted file mode 100644 index be66189e..00000000 --- a/vendor/github.com/nats-io/nats.go/.golangci.yaml +++ /dev/null @@ -1,13 +0,0 @@ -issues: - max-issues-per-linter: 0 - max-same-issues: 0 - exclude-rules: - - linters: - - errcheck - text: "Unsubscribe" - - linters: - - errcheck - text: "msg.Ack" - - linters: - - errcheck - text: "watcher.Stop" diff --git a/vendor/github.com/nats-io/nats.go/.travis.yml b/vendor/github.com/nats-io/nats.go/.travis.yml deleted file mode 100644 index 36879705..00000000 --- a/vendor/github.com/nats-io/nats.go/.travis.yml +++ /dev/null @@ -1,36 +0,0 @@ -language: go -go: -- "1.21.x" -- "1.20.x" -go_import_path: github.com/nats-io/nats.go -install: -- go get -t ./... -- curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin -- if [[ "$TRAVIS_GO_VERSION" =~ 1.21 ]]; then - go install github.com/mattn/goveralls@latest; - go install github.com/wadey/gocovmerge@latest; - go install honnef.co/go/tools/cmd/staticcheck@latest; - go install github.com/client9/misspell/cmd/misspell@latest; - fi -before_script: -- $(exit $(go fmt ./... | wc -l)) -- go vet -modfile=go_test.mod ./... -- if [[ "$TRAVIS_GO_VERSION" =~ 1.21 ]]; then - find . -type f -name "*.go" | xargs misspell -error -locale US; - GOFLAGS="-mod=mod -modfile=go_test.mod" staticcheck ./...; - fi -- golangci-lint run ./jetstream/... -script: -- go test -modfile=go_test.mod -v -run=TestNoRace -p=1 ./... --failfast -vet=off -- if [[ "$TRAVIS_GO_VERSION" =~ 1.21 ]]; then ./scripts/cov.sh TRAVIS; else go test -modfile=go_test.mod -race -v -p=1 ./... --failfast -vet=off; fi -after_success: -- if [[ "$TRAVIS_GO_VERSION" =~ 1.21 ]]; then $HOME/gopath/bin/goveralls -coverprofile=acc.out -service travis-ci; fi - -jobs: - include: - - name: "Go: 1.21.x (nats-server@main)" - go: "1.21.x" - before_script: - - go get -modfile go_test.mod github.com/nats-io/nats-server/v2@main - allow_failures: - - name: "Go: 1.21.x (nats-server@main)" diff --git a/vendor/github.com/nats-io/nats.go/.words b/vendor/github.com/nats-io/nats.go/.words deleted file mode 100644 index 24be7f62..00000000 --- a/vendor/github.com/nats-io/nats.go/.words +++ /dev/null @@ -1,106 +0,0 @@ -1 - -derek -dlc -ivan - -acknowledgement/SM -arity -deduplication/S -demarshal/SDG -durables -iff -observable/S -redelivery/S -retransmitting -retry/SB - -SlowConsumer - -AppendInt -ReadMIMEHeader - -clientProtoZero -jetstream -v1 -v2 - -ack/SGD -auth -authToken -chans -creds -config/S -cseq -impl -msgh -msgId -mux/S -nack -ptr -puback -scanf -stderr -stdout -structs -tm -todo -unsub/S - -permessage -permessage-deflate -urlA -urlB -websocket -ws -wss - -NKey -pList - -backend/S -backoff/S -decompressor/CGS -inflight -inlined -lookups -reconnection/MS -redeliver/ADGS -responder/S -rewrap/S -rollup/S -unreceive/DRSZGB -variadic -wakeup/S -whitespace -wrap/AS - -omitempty - -apache -html -ietf -www - -sum256 -32bit/S -64bit/S -64k -128k -512k - -hacky -handroll/D - -rfc6455 -rfc7692 -0x00 -0xff -20x -40x -50x - -ErrXXX - -atlanta -eu diff --git a/vendor/github.com/nats-io/nats.go/.words.readme b/vendor/github.com/nats-io/nats.go/.words.readme deleted file mode 100644 index 9d9f5cbb..00000000 --- a/vendor/github.com/nats-io/nats.go/.words.readme +++ /dev/null @@ -1,25 +0,0 @@ -The .words file is used by gospel (v1.2+), which wraps the Hunspell libraries -but populates the dictionary with identifiers from the Go source. - - - -Alas, no comments are allowed in the .words file and newer versions of gospel -error out on seeing them. This is really a hunspell restriction. - -We assume en_US hunspell dictionaries are installed and used. -The /AFFIXRULES are defined in en_US.aff (eg: /usr/share/hunspell/en_US.aff) -Invoke `hunspell -D` to see the actual locations. - -Words which are in the base dictionary can't have extra affix rules added to -them, so we have to start with the affixed variant we want to add. -Thus `creds` rather than `cred/S` and so on. - -So we can't use receive/DRSZGBU, adding 'U', to allow unreceive and variants, -we have to use unreceive as the stem. - -We can't define our own affix or compound rules, -to capture rfc\d{3,} or 0x[0-9A-Fa-f]{2} - -The spelling tokenizer doesn't take "permessage-deflate" as allowing for ... -"permessage-deflate", which is an RFC7692 registered extension for websockets. -We have to explicitly list "permessage". diff --git a/vendor/github.com/nats-io/nats.go/CODE-OF-CONDUCT.md b/vendor/github.com/nats-io/nats.go/CODE-OF-CONDUCT.md deleted file mode 100644 index b850d49e..00000000 --- a/vendor/github.com/nats-io/nats.go/CODE-OF-CONDUCT.md +++ /dev/null @@ -1,3 +0,0 @@ -## Community Code of Conduct - -NATS follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/vendor/github.com/nats-io/nats.go/GOVERNANCE.md b/vendor/github.com/nats-io/nats.go/GOVERNANCE.md deleted file mode 100644 index 1d5a7be3..00000000 --- a/vendor/github.com/nats-io/nats.go/GOVERNANCE.md +++ /dev/null @@ -1,3 +0,0 @@ -# NATS Go Client Governance - -NATS Go Client (go-nats) is part of the NATS project and is subject to the [NATS Governance](https://github.com/nats-io/nats-general/blob/master/GOVERNANCE.md). \ No newline at end of file diff --git a/vendor/github.com/nats-io/nats.go/LICENSE b/vendor/github.com/nats-io/nats.go/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/vendor/github.com/nats-io/nats.go/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/nats-io/nats.go/MAINTAINERS.md b/vendor/github.com/nats-io/nats.go/MAINTAINERS.md deleted file mode 100644 index 23214655..00000000 --- a/vendor/github.com/nats-io/nats.go/MAINTAINERS.md +++ /dev/null @@ -1,8 +0,0 @@ -# Maintainers - -Maintainership is on a per project basis. - -### Maintainers - - Derek Collison [@derekcollison](https://github.com/derekcollison) - - Ivan Kozlovic [@kozlovic](https://github.com/kozlovic) - - Waldemar Quevedo [@wallyqs](https://github.com/wallyqs) diff --git a/vendor/github.com/nats-io/nats.go/README.md b/vendor/github.com/nats-io/nats.go/README.md deleted file mode 100644 index 108db4e3..00000000 --- a/vendor/github.com/nats-io/nats.go/README.md +++ /dev/null @@ -1,480 +0,0 @@ -# NATS - Go Client -A [Go](http://golang.org) client for the [NATS messaging system](https://nats.io). - -[![License Apache 2][License-Image]][License-Url] [![Go Report Card][ReportCard-Image]][ReportCard-Url] [![Build Status][Build-Status-Image]][Build-Status-Url] [![GoDoc][GoDoc-Image]][GoDoc-Url] [![Coverage Status][Coverage-image]][Coverage-Url] - -[License-Url]: https://www.apache.org/licenses/LICENSE-2.0 -[License-Image]: https://img.shields.io/badge/License-Apache2-blue.svg -[ReportCard-Url]: https://goreportcard.com/report/github.com/nats-io/nats.go -[ReportCard-Image]: https://goreportcard.com/badge/github.com/nats-io/nats.go -[Build-Status-Url]: https://travis-ci.com/github/nats-io/nats.go -[Build-Status-Image]: https://travis-ci.com/nats-io/nats.go.svg?branch=main -[GoDoc-Url]: https://pkg.go.dev/github.com/nats-io/nats.go -[GoDoc-Image]: https://img.shields.io/badge/GoDoc-reference-007d9c -[Coverage-Url]: https://coveralls.io/r/nats-io/nats.go?branch=main -[Coverage-image]: https://coveralls.io/repos/github/nats-io/nats.go/badge.svg?branch=main - -## Installation - -```bash -# Go client -go get github.com/nats-io/nats.go/ - -# Server -go get github.com/nats-io/nats-server -``` - -When using or transitioning to Go modules support: - -```bash -# Go client latest or explicit version -go get github.com/nats-io/nats.go/@latest -go get github.com/nats-io/nats.go/@v1.30.2 - -# For latest NATS Server, add /v2 at the end -go get github.com/nats-io/nats-server/v2 - -# NATS Server v1 is installed otherwise -# go get github.com/nats-io/nats-server -``` - -## Basic Usage - -```go -import "github.com/nats-io/nats.go" - -// Connect to a server -nc, _ := nats.Connect(nats.DefaultURL) - -// Simple Publisher -nc.Publish("foo", []byte("Hello World")) - -// Simple Async Subscriber -nc.Subscribe("foo", func(m *nats.Msg) { - fmt.Printf("Received a message: %s\n", string(m.Data)) -}) - -// Responding to a request message -nc.Subscribe("request", func(m *nats.Msg) { - m.Respond([]byte("answer is 42")) -}) - -// Simple Sync Subscriber -sub, err := nc.SubscribeSync("foo") -m, err := sub.NextMsg(timeout) - -// Channel Subscriber -ch := make(chan *nats.Msg, 64) -sub, err := nc.ChanSubscribe("foo", ch) -msg := <- ch - -// Unsubscribe -sub.Unsubscribe() - -// Drain -sub.Drain() - -// Requests -msg, err := nc.Request("help", []byte("help me"), 10*time.Millisecond) - -// Replies -nc.Subscribe("help", func(m *nats.Msg) { - nc.Publish(m.Reply, []byte("I can help!")) -}) - -// Drain connection (Preferred for responders) -// Close() not needed if this is called. -nc.Drain() - -// Close connection -nc.Close() -``` - -## JetStream - -JetStream is the built-in NATS persistence system. `nats.go` provides a built-in -API enabling both managing JetStream assets as well as publishing/consuming -persistent messages. - -### Basic usage - -```go -// connect to nats server -nc, _ := nats.Connect(nats.DefaultURL) - -// create jetstream context from nats connection -js, _ := jetstream.New(nc) - -ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) -defer cancel() - -// get existing stream handle -stream, _ := js.Stream(ctx, "foo") - -// retrieve consumer handle from a stream -cons, _ := stream.Consumer(ctx, "cons") - -// consume messages from the consumer in callback -cc, _ := cons.Consume(func(msg jetstream.Msg) { - fmt.Println("Received jetstream message: ", string(msg.Data())) - msg.Ack() -}) -defer cc.Stop() -``` - -To find more information on `nats.go` JetStream API, visit -[`jetstream/README.md`](jetstream/README.md) - -> The current JetStream API replaces the [legacy JetStream API](legacy_jetstream.md) - -## Service API - -The service API (`micro`) allows you to [easily build NATS services](micro/README.md) The -services API is currently in beta release. - -## Encoded Connections - -```go - -nc, _ := nats.Connect(nats.DefaultURL) -c, _ := nats.NewEncodedConn(nc, nats.JSON_ENCODER) -defer c.Close() - -// Simple Publisher -c.Publish("foo", "Hello World") - -// Simple Async Subscriber -c.Subscribe("foo", func(s string) { - fmt.Printf("Received a message: %s\n", s) -}) - -// EncodedConn can Publish any raw Go type using the registered Encoder -type person struct { - Name string - Address string - Age int -} - -// Go type Subscriber -c.Subscribe("hello", func(p *person) { - fmt.Printf("Received a person: %+v\n", p) -}) - -me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery Street, San Francisco, CA"} - -// Go type Publisher -c.Publish("hello", me) - -// Unsubscribe -sub, err := c.Subscribe("foo", nil) -// ... -sub.Unsubscribe() - -// Requests -var response string -err = c.Request("help", "help me", &response, 10*time.Millisecond) -if err != nil { - fmt.Printf("Request failed: %v\n", err) -} - -// Replying -c.Subscribe("help", func(subj, reply string, msg string) { - c.Publish(reply, "I can help!") -}) - -// Close connection -c.Close(); -``` - -## New Authentication (Nkeys and User Credentials) -This requires server with version >= 2.0.0 - -NATS servers have a new security and authentication mechanism to authenticate with user credentials and Nkeys. -The simplest form is to use the helper method UserCredentials(credsFilepath). -```go -nc, err := nats.Connect(url, nats.UserCredentials("user.creds")) -``` - -The helper methods creates two callback handlers to present the user JWT and sign the nonce challenge from the server. -The core client library never has direct access to your private key and simply performs the callback for signing the server challenge. -The helper will load and wipe and erase memory it uses for each connect or reconnect. - -The helper also can take two entries, one for the JWT and one for the NKey seed file. -```go -nc, err := nats.Connect(url, nats.UserCredentials("user.jwt", "user.nk")) -``` - -You can also set the callback handlers directly and manage challenge signing directly. -```go -nc, err := nats.Connect(url, nats.UserJWT(jwtCB, sigCB)) -``` - -Bare Nkeys are also supported. The nkey seed should be in a read only file, e.g. seed.txt -```bash -> cat seed.txt -# This is my seed nkey! -SUAGMJH5XLGZKQQWAWKRZJIGMOU4HPFUYLXJMXOO5NLFEO2OOQJ5LPRDPM -``` - -This is a helper function which will load and decode and do the proper signing for the server nonce. -It will clear memory in between invocations. -You can choose to use the low level option and provide the public key and a signature callback on your own. - -```go -opt, err := nats.NkeyOptionFromSeed("seed.txt") -nc, err := nats.Connect(serverUrl, opt) - -// Direct -nc, err := nats.Connect(serverUrl, nats.Nkey(pubNkey, sigCB)) -``` - -## TLS - -```go -// tls as a scheme will enable secure connections by default. This will also verify the server name. -nc, err := nats.Connect("tls://nats.demo.io:4443") - -// If you are using a self-signed certificate, you need to have a tls.Config with RootCAs setup. -// We provide a helper method to make this case easier. -nc, err = nats.Connect("tls://localhost:4443", nats.RootCAs("./configs/certs/ca.pem")) - -// If the server requires client certificate, there is an helper function for that too: -cert := nats.ClientCert("./configs/certs/client-cert.pem", "./configs/certs/client-key.pem") -nc, err = nats.Connect("tls://localhost:4443", cert) - -// You can also supply a complete tls.Config - -certFile := "./configs/certs/client-cert.pem" -keyFile := "./configs/certs/client-key.pem" -cert, err := tls.LoadX509KeyPair(certFile, keyFile) -if err != nil { - t.Fatalf("error parsing X509 certificate/key pair: %v", err) -} - -config := &tls.Config{ - ServerName: opts.Host, - Certificates: []tls.Certificate{cert}, - RootCAs: pool, - MinVersion: tls.VersionTLS12, -} - -nc, err = nats.Connect("nats://localhost:4443", nats.Secure(config)) -if err != nil { - t.Fatalf("Got an error on Connect with Secure Options: %+v\n", err) -} - -``` - -## Using Go Channels (netchan) - -```go -nc, _ := nats.Connect(nats.DefaultURL) -ec, _ := nats.NewEncodedConn(nc, nats.JSON_ENCODER) -defer ec.Close() - -type person struct { - Name string - Address string - Age int -} - -recvCh := make(chan *person) -ec.BindRecvChan("hello", recvCh) - -sendCh := make(chan *person) -ec.BindSendChan("hello", sendCh) - -me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery Street"} - -// Send via Go channels -sendCh <- me - -// Receive via Go channels -who := <- recvCh -``` - -## Wildcard Subscriptions - -```go - -// "*" matches any token, at any level of the subject. -nc.Subscribe("foo.*.baz", func(m *Msg) { - fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data)); -}) - -nc.Subscribe("foo.bar.*", func(m *Msg) { - fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data)); -}) - -// ">" matches any length of the tail of a subject, and can only be the last token -// E.g. 'foo.>' will match 'foo.bar', 'foo.bar.baz', 'foo.foo.bar.bax.22' -nc.Subscribe("foo.>", func(m *Msg) { - fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data)); -}) - -// Matches all of the above -nc.Publish("foo.bar.baz", []byte("Hello World")) - -``` - -## Queue Groups - -```go -// All subscriptions with the same queue name will form a queue group. -// Each message will be delivered to only one subscriber per queue group, -// using queuing semantics. You can have as many queue groups as you wish. -// Normal subscribers will continue to work as expected. - -nc.QueueSubscribe("foo", "job_workers", func(_ *Msg) { - received += 1; -}) -``` - -## Advanced Usage - -```go - -// Normally, the library will return an error when trying to connect and -// there is no server running. The RetryOnFailedConnect option will set -// the connection in reconnecting state if it failed to connect right away. -nc, err := nats.Connect(nats.DefaultURL, - nats.RetryOnFailedConnect(true), - nats.MaxReconnects(10), - nats.ReconnectWait(time.Second), - nats.ReconnectHandler(func(_ *nats.Conn) { - // Note that this will be invoked for the first asynchronous connect. - })) -if err != nil { - // Should not return an error even if it can't connect, but you still - // need to check in case there are some configuration errors. -} - -// Flush connection to server, returns when all messages have been processed. -nc.Flush() -fmt.Println("All clear!") - -// FlushTimeout specifies a timeout value as well. -err := nc.FlushTimeout(1*time.Second) -if err != nil { - fmt.Println("All clear!") -} else { - fmt.Println("Flushed timed out!") -} - -// Auto-unsubscribe after MAX_WANTED messages received -const MAX_WANTED = 10 -sub, err := nc.Subscribe("foo") -sub.AutoUnsubscribe(MAX_WANTED) - -// Multiple connections -nc1 := nats.Connect("nats://host1:4222") -nc2 := nats.Connect("nats://host2:4222") - -nc1.Subscribe("foo", func(m *Msg) { - fmt.Printf("Received a message: %s\n", string(m.Data)) -}) - -nc2.Publish("foo", []byte("Hello World!")); - -``` - -## Clustered Usage - -```go - -var servers = "nats://localhost:1222, nats://localhost:1223, nats://localhost:1224" - -nc, err := nats.Connect(servers) - -// Optionally set ReconnectWait and MaxReconnect attempts. -// This example means 10 seconds total per backend. -nc, err = nats.Connect(servers, nats.MaxReconnects(5), nats.ReconnectWait(2 * time.Second)) - -// You can also add some jitter for the reconnection. -// This call will add up to 500 milliseconds for non TLS connections and 2 seconds for TLS connections. -// If not specified, the library defaults to 100 milliseconds and 1 second, respectively. -nc, err = nats.Connect(servers, nats.ReconnectJitter(500*time.Millisecond, 2*time.Second)) - -// You can also specify a custom reconnect delay handler. If set, the library will invoke it when it has tried -// all URLs in its list. The value returned will be used as the total sleep time, so add your own jitter. -// The library will pass the number of times it went through the whole list. -nc, err = nats.Connect(servers, nats.CustomReconnectDelay(func(attempts int) time.Duration { - return someBackoffFunction(attempts) -})) - -// Optionally disable randomization of the server pool -nc, err = nats.Connect(servers, nats.DontRandomize()) - -// Setup callbacks to be notified on disconnects, reconnects and connection closed. -nc, err = nats.Connect(servers, - nats.DisconnectErrHandler(func(nc *nats.Conn, err error) { - fmt.Printf("Got disconnected! Reason: %q\n", err) - }), - nats.ReconnectHandler(func(nc *nats.Conn) { - fmt.Printf("Got reconnected to %v!\n", nc.ConnectedUrl()) - }), - nats.ClosedHandler(func(nc *nats.Conn) { - fmt.Printf("Connection closed. Reason: %q\n", nc.LastError()) - }) -) - -// When connecting to a mesh of servers with auto-discovery capabilities, -// you may need to provide a username/password or token in order to connect -// to any server in that mesh when authentication is required. -// Instead of providing the credentials in the initial URL, you will use -// new option setters: -nc, err = nats.Connect("nats://localhost:4222", nats.UserInfo("foo", "bar")) - -// For token based authentication: -nc, err = nats.Connect("nats://localhost:4222", nats.Token("S3cretT0ken")) - -// You can even pass the two at the same time in case one of the server -// in the mesh requires token instead of user name and password. -nc, err = nats.Connect("nats://localhost:4222", - nats.UserInfo("foo", "bar"), - nats.Token("S3cretT0ken")) - -// Note that if credentials are specified in the initial URLs, they take -// precedence on the credentials specified through the options. -// For instance, in the connect call below, the client library will use -// the user "my" and password "pwd" to connect to localhost:4222, however, -// it will use username "foo" and password "bar" when (re)connecting to -// a different server URL that it got as part of the auto-discovery. -nc, err = nats.Connect("nats://my:pwd@localhost:4222", nats.UserInfo("foo", "bar")) - -``` - -## Context support (+Go 1.7) - -```go -ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) -defer cancel() - -nc, err := nats.Connect(nats.DefaultURL) - -// Request with context -msg, err := nc.RequestWithContext(ctx, "foo", []byte("bar")) - -// Synchronous subscriber with context -sub, err := nc.SubscribeSync("foo") -msg, err := sub.NextMsgWithContext(ctx) - -// Encoded Request with context -c, err := nats.NewEncodedConn(nc, nats.JSON_ENCODER) -type request struct { - Message string `json:"message"` -} -type response struct { - Code int `json:"code"` -} -req := &request{Message: "Hello"} -resp := &response{} -err := c.RequestWithContext(ctx, "foo", req, resp) -``` - -## License - -Unless otherwise noted, the NATS source files are distributed -under the Apache Version 2.0 license found in the LICENSE file. - -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fnats-io%2Fgo-nats.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fnats-io%2Fgo-nats?ref=badge_large) diff --git a/vendor/github.com/nats-io/nats.go/context.go b/vendor/github.com/nats-io/nats.go/context.go deleted file mode 100644 index c4ef4be1..00000000 --- a/vendor/github.com/nats-io/nats.go/context.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2016-2022 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nats - -import ( - "context" - "reflect" -) - -// RequestMsgWithContext takes a context, a subject and payload -// in bytes and request expecting a single response. -func (nc *Conn) RequestMsgWithContext(ctx context.Context, msg *Msg) (*Msg, error) { - if msg == nil { - return nil, ErrInvalidMsg - } - hdr, err := msg.headerBytes() - if err != nil { - return nil, err - } - return nc.requestWithContext(ctx, msg.Subject, hdr, msg.Data) -} - -// RequestWithContext takes a context, a subject and payload -// in bytes and request expecting a single response. -func (nc *Conn) RequestWithContext(ctx context.Context, subj string, data []byte) (*Msg, error) { - return nc.requestWithContext(ctx, subj, nil, data) -} - -func (nc *Conn) requestWithContext(ctx context.Context, subj string, hdr, data []byte) (*Msg, error) { - if ctx == nil { - return nil, ErrInvalidContext - } - if nc == nil { - return nil, ErrInvalidConnection - } - // Check whether the context is done already before making - // the request. - if ctx.Err() != nil { - return nil, ctx.Err() - } - - var m *Msg - var err error - - // If user wants the old style. - if nc.useOldRequestStyle() { - m, err = nc.oldRequestWithContext(ctx, subj, hdr, data) - } else { - mch, token, err := nc.createNewRequestAndSend(subj, hdr, data) - if err != nil { - return nil, err - } - - var ok bool - - select { - case m, ok = <-mch: - if !ok { - return nil, ErrConnectionClosed - } - case <-ctx.Done(): - nc.mu.Lock() - delete(nc.respMap, token) - nc.mu.Unlock() - return nil, ctx.Err() - } - } - // Check for no responder status. - if err == nil && len(m.Data) == 0 && m.Header.Get(statusHdr) == noResponders { - m, err = nil, ErrNoResponders - } - return m, err -} - -// oldRequestWithContext utilizes inbox and subscription per request. -func (nc *Conn) oldRequestWithContext(ctx context.Context, subj string, hdr, data []byte) (*Msg, error) { - inbox := nc.NewInbox() - ch := make(chan *Msg, RequestChanLen) - - s, err := nc.subscribe(inbox, _EMPTY_, nil, ch, true, nil) - if err != nil { - return nil, err - } - s.AutoUnsubscribe(1) - defer s.Unsubscribe() - - err = nc.publish(subj, inbox, hdr, data) - if err != nil { - return nil, err - } - - return s.NextMsgWithContext(ctx) -} - -func (s *Subscription) nextMsgWithContext(ctx context.Context, pullSubInternal, waitIfNoMsg bool) (*Msg, error) { - if ctx == nil { - return nil, ErrInvalidContext - } - if s == nil { - return nil, ErrBadSubscription - } - if ctx.Err() != nil { - return nil, ctx.Err() - } - - s.mu.Lock() - err := s.validateNextMsgState(pullSubInternal) - if err != nil { - s.mu.Unlock() - return nil, err - } - - // snapshot - mch := s.mch - s.mu.Unlock() - - var ok bool - var msg *Msg - - // If something is available right away, let's optimize that case. - select { - case msg, ok = <-mch: - if !ok { - return nil, s.getNextMsgErr() - } - if err := s.processNextMsgDelivered(msg); err != nil { - return nil, err - } - return msg, nil - default: - // If internal and we don't want to wait, signal that there is no - // message in the internal queue. - if pullSubInternal && !waitIfNoMsg { - return nil, errNoMessages - } - } - - select { - case msg, ok = <-mch: - if !ok { - return nil, s.getNextMsgErr() - } - if err := s.processNextMsgDelivered(msg); err != nil { - return nil, err - } - case <-ctx.Done(): - return nil, ctx.Err() - } - - return msg, nil -} - -// NextMsgWithContext takes a context and returns the next message -// available to a synchronous subscriber, blocking until it is delivered -// or context gets canceled. -func (s *Subscription) NextMsgWithContext(ctx context.Context) (*Msg, error) { - return s.nextMsgWithContext(ctx, false, true) -} - -// FlushWithContext will allow a context to control the duration -// of a Flush() call. This context should be non-nil and should -// have a deadline set. We will return an error if none is present. -func (nc *Conn) FlushWithContext(ctx context.Context) error { - if nc == nil { - return ErrInvalidConnection - } - if ctx == nil { - return ErrInvalidContext - } - _, ok := ctx.Deadline() - if !ok { - return ErrNoDeadlineContext - } - - nc.mu.Lock() - if nc.isClosed() { - nc.mu.Unlock() - return ErrConnectionClosed - } - // Create a buffered channel to prevent chan send to block - // in processPong() - ch := make(chan struct{}, 1) - nc.sendPing(ch) - nc.mu.Unlock() - - var err error - - select { - case _, ok := <-ch: - if !ok { - err = ErrConnectionClosed - } else { - close(ch) - } - case <-ctx.Done(): - err = ctx.Err() - } - - if err != nil { - nc.removeFlushEntry(ch) - } - - return err -} - -// RequestWithContext will create an Inbox and perform a Request -// using the provided cancellation context with the Inbox reply -// for the data v. A response will be decoded into the vPtr last parameter. -func (c *EncodedConn) RequestWithContext(ctx context.Context, subject string, v any, vPtr any) error { - if ctx == nil { - return ErrInvalidContext - } - - b, err := c.Enc.Encode(subject, v) - if err != nil { - return err - } - m, err := c.Conn.RequestWithContext(ctx, subject, b) - if err != nil { - return err - } - if reflect.TypeOf(vPtr) == emptyMsgType { - mPtr := vPtr.(*Msg) - *mPtr = *m - } else { - err := c.Enc.Decode(m.Subject, m.Data, vPtr) - if err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/nats-io/nats.go/dependencies.md b/vendor/github.com/nats-io/nats.go/dependencies.md deleted file mode 100644 index cc986b27..00000000 --- a/vendor/github.com/nats-io/nats.go/dependencies.md +++ /dev/null @@ -1,13 +0,0 @@ -# External Dependencies - -This file lists the dependencies used in this repository. - -| Dependency | License | -|-|-| -| Go | BSD 3-Clause "New" or "Revised" License | -| github.com/nats-io/nats.go | Apache License 2.0 | -| github.com/golang/protobuf v1.4.2 | BSD 3-Clause "New" or "Revised" License | -| github.com/nats-io/nats-server/v2 v2.1.8-0.20201115145023-f61fa8529a0f | Apache License 2.0 | -| github.com/nats-io/nkeys v0.2.0 | Apache License 2.0 | -| github.com/nats-io/nuid v1.0.1 | Apache License 2.0 | -| google.golang.org/protobuf v1.23.0 | BSD 3-Clause License | diff --git a/vendor/github.com/nats-io/nats.go/enc.go b/vendor/github.com/nats-io/nats.go/enc.go deleted file mode 100644 index a1c54f24..00000000 --- a/vendor/github.com/nats-io/nats.go/enc.go +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright 2012-2019 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nats - -import ( - "errors" - "fmt" - "reflect" - "sync" - "time" - - // Default Encoders - "github.com/nats-io/nats.go/encoders/builtin" -) - -// Encoder interface is for all register encoders -type Encoder interface { - Encode(subject string, v any) ([]byte, error) - Decode(subject string, data []byte, vPtr any) error -} - -var encMap map[string]Encoder -var encLock sync.Mutex - -// Indexed names into the Registered Encoders. -const ( - JSON_ENCODER = "json" - GOB_ENCODER = "gob" - DEFAULT_ENCODER = "default" -) - -func init() { - encMap = make(map[string]Encoder) - // Register json, gob and default encoder - RegisterEncoder(JSON_ENCODER, &builtin.JsonEncoder{}) - RegisterEncoder(GOB_ENCODER, &builtin.GobEncoder{}) - RegisterEncoder(DEFAULT_ENCODER, &builtin.DefaultEncoder{}) -} - -// EncodedConn are the preferred way to interface with NATS. They wrap a bare connection to -// a nats server and have an extendable encoder system that will encode and decode messages -// from raw Go types. -type EncodedConn struct { - Conn *Conn - Enc Encoder -} - -// NewEncodedConn will wrap an existing Connection and utilize the appropriate registered -// encoder. -func NewEncodedConn(c *Conn, encType string) (*EncodedConn, error) { - if c == nil { - return nil, errors.New("nats: Nil Connection") - } - if c.IsClosed() { - return nil, ErrConnectionClosed - } - ec := &EncodedConn{Conn: c, Enc: EncoderForType(encType)} - if ec.Enc == nil { - return nil, fmt.Errorf("no encoder registered for '%s'", encType) - } - return ec, nil -} - -// RegisterEncoder will register the encType with the given Encoder. Useful for customization. -func RegisterEncoder(encType string, enc Encoder) { - encLock.Lock() - defer encLock.Unlock() - encMap[encType] = enc -} - -// EncoderForType will return the registered Encoder for the encType. -func EncoderForType(encType string) Encoder { - encLock.Lock() - defer encLock.Unlock() - return encMap[encType] -} - -// Publish publishes the data argument to the given subject. The data argument -// will be encoded using the associated encoder. -func (c *EncodedConn) Publish(subject string, v any) error { - b, err := c.Enc.Encode(subject, v) - if err != nil { - return err - } - return c.Conn.publish(subject, _EMPTY_, nil, b) -} - -// PublishRequest will perform a Publish() expecting a response on the -// reply subject. Use Request() for automatically waiting for a response -// inline. -func (c *EncodedConn) PublishRequest(subject, reply string, v any) error { - b, err := c.Enc.Encode(subject, v) - if err != nil { - return err - } - return c.Conn.publish(subject, reply, nil, b) -} - -// Request will create an Inbox and perform a Request() call -// with the Inbox reply for the data v. A response will be -// decoded into the vPtr Response. -func (c *EncodedConn) Request(subject string, v any, vPtr any, timeout time.Duration) error { - b, err := c.Enc.Encode(subject, v) - if err != nil { - return err - } - m, err := c.Conn.Request(subject, b, timeout) - if err != nil { - return err - } - if reflect.TypeOf(vPtr) == emptyMsgType { - mPtr := vPtr.(*Msg) - *mPtr = *m - } else { - err = c.Enc.Decode(m.Subject, m.Data, vPtr) - } - return err -} - -// Handler is a specific callback used for Subscribe. It is generalized to -// an any, but we will discover its format and arguments at runtime -// and perform the correct callback, including demarshaling encoded data -// back into the appropriate struct based on the signature of the Handler. -// -// Handlers are expected to have one of four signatures. -// -// type person struct { -// Name string `json:"name,omitempty"` -// Age uint `json:"age,omitempty"` -// } -// -// handler := func(m *Msg) -// handler := func(p *person) -// handler := func(subject string, o *obj) -// handler := func(subject, reply string, o *obj) -// -// These forms allow a callback to request a raw Msg ptr, where the processing -// of the message from the wire is untouched. Process a JSON representation -// and demarshal it into the given struct, e.g. person. -// There are also variants where the callback wants either the subject, or the -// subject and the reply subject. -type Handler any - -// Dissect the cb Handler's signature -func argInfo(cb Handler) (reflect.Type, int) { - cbType := reflect.TypeOf(cb) - if cbType.Kind() != reflect.Func { - panic("nats: Handler needs to be a func") - } - numArgs := cbType.NumIn() - if numArgs == 0 { - return nil, numArgs - } - return cbType.In(numArgs - 1), numArgs -} - -var emptyMsgType = reflect.TypeOf(&Msg{}) - -// Subscribe will create a subscription on the given subject and process incoming -// messages using the specified Handler. The Handler should be a func that matches -// a signature from the description of Handler from above. -func (c *EncodedConn) Subscribe(subject string, cb Handler) (*Subscription, error) { - return c.subscribe(subject, _EMPTY_, cb) -} - -// QueueSubscribe will create a queue subscription on the given subject and process -// incoming messages using the specified Handler. The Handler should be a func that -// matches a signature from the description of Handler from above. -func (c *EncodedConn) QueueSubscribe(subject, queue string, cb Handler) (*Subscription, error) { - return c.subscribe(subject, queue, cb) -} - -// Internal implementation that all public functions will use. -func (c *EncodedConn) subscribe(subject, queue string, cb Handler) (*Subscription, error) { - if cb == nil { - return nil, errors.New("nats: Handler required for EncodedConn Subscription") - } - argType, numArgs := argInfo(cb) - if argType == nil { - return nil, errors.New("nats: Handler requires at least one argument") - } - - cbValue := reflect.ValueOf(cb) - wantsRaw := (argType == emptyMsgType) - - natsCB := func(m *Msg) { - var oV []reflect.Value - if wantsRaw { - oV = []reflect.Value{reflect.ValueOf(m)} - } else { - var oPtr reflect.Value - if argType.Kind() != reflect.Ptr { - oPtr = reflect.New(argType) - } else { - oPtr = reflect.New(argType.Elem()) - } - if err := c.Enc.Decode(m.Subject, m.Data, oPtr.Interface()); err != nil { - if c.Conn.Opts.AsyncErrorCB != nil { - c.Conn.ach.push(func() { - c.Conn.Opts.AsyncErrorCB(c.Conn, m.Sub, errors.New("nats: Got an error trying to unmarshal: "+err.Error())) - }) - } - return - } - if argType.Kind() != reflect.Ptr { - oPtr = reflect.Indirect(oPtr) - } - - // Callback Arity - switch numArgs { - case 1: - oV = []reflect.Value{oPtr} - case 2: - subV := reflect.ValueOf(m.Subject) - oV = []reflect.Value{subV, oPtr} - case 3: - subV := reflect.ValueOf(m.Subject) - replyV := reflect.ValueOf(m.Reply) - oV = []reflect.Value{subV, replyV, oPtr} - } - - } - cbValue.Call(oV) - } - - return c.Conn.subscribe(subject, queue, natsCB, nil, false, nil) -} - -// FlushTimeout allows a Flush operation to have an associated timeout. -func (c *EncodedConn) FlushTimeout(timeout time.Duration) (err error) { - return c.Conn.FlushTimeout(timeout) -} - -// Flush will perform a round trip to the server and return when it -// receives the internal reply. -func (c *EncodedConn) Flush() error { - return c.Conn.Flush() -} - -// Close will close the connection to the server. This call will release -// all blocking calls, such as Flush(), etc. -func (c *EncodedConn) Close() { - c.Conn.Close() -} - -// Drain will put a connection into a drain state. All subscriptions will -// immediately be put into a drain state. Upon completion, the publishers -// will be drained and can not publish any additional messages. Upon draining -// of the publishers, the connection will be closed. Use the ClosedCB() -// option to know when the connection has moved from draining to closed. -func (c *EncodedConn) Drain() error { - return c.Conn.Drain() -} - -// LastError reports the last error encountered via the Connection. -func (c *EncodedConn) LastError() error { - return c.Conn.LastError() -} diff --git a/vendor/github.com/nats-io/nats.go/encoders/builtin/default_enc.go b/vendor/github.com/nats-io/nats.go/encoders/builtin/default_enc.go deleted file mode 100644 index 65c2d68b..00000000 --- a/vendor/github.com/nats-io/nats.go/encoders/builtin/default_enc.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2012-2018 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package builtin - -import ( - "bytes" - "fmt" - "reflect" - "strconv" - "unsafe" -) - -// DefaultEncoder implementation for EncodedConn. -// This encoder will leave []byte and string untouched, but will attempt to -// turn numbers into appropriate strings that can be decoded. It will also -// propely encoded and decode bools. If will encode a struct, but if you want -// to properly handle structures you should use JsonEncoder. -type DefaultEncoder struct { - // Empty -} - -var trueB = []byte("true") -var falseB = []byte("false") -var nilB = []byte("") - -// Encode -func (je *DefaultEncoder) Encode(subject string, v any) ([]byte, error) { - switch arg := v.(type) { - case string: - bytes := *(*[]byte)(unsafe.Pointer(&arg)) - return bytes, nil - case []byte: - return arg, nil - case bool: - if arg { - return trueB, nil - } else { - return falseB, nil - } - case nil: - return nilB, nil - default: - var buf bytes.Buffer - fmt.Fprintf(&buf, "%+v", arg) - return buf.Bytes(), nil - } -} - -// Decode -func (je *DefaultEncoder) Decode(subject string, data []byte, vPtr any) error { - // Figure out what it's pointing to... - sData := *(*string)(unsafe.Pointer(&data)) - switch arg := vPtr.(type) { - case *string: - *arg = sData - return nil - case *[]byte: - *arg = data - return nil - case *int: - n, err := strconv.ParseInt(sData, 10, 64) - if err != nil { - return err - } - *arg = int(n) - return nil - case *int32: - n, err := strconv.ParseInt(sData, 10, 64) - if err != nil { - return err - } - *arg = int32(n) - return nil - case *int64: - n, err := strconv.ParseInt(sData, 10, 64) - if err != nil { - return err - } - *arg = int64(n) - return nil - case *float32: - n, err := strconv.ParseFloat(sData, 32) - if err != nil { - return err - } - *arg = float32(n) - return nil - case *float64: - n, err := strconv.ParseFloat(sData, 64) - if err != nil { - return err - } - *arg = float64(n) - return nil - case *bool: - b, err := strconv.ParseBool(sData) - if err != nil { - return err - } - *arg = b - return nil - default: - vt := reflect.TypeOf(arg).Elem() - return fmt.Errorf("nats: Default Encoder can't decode to type %s", vt) - } -} diff --git a/vendor/github.com/nats-io/nats.go/encoders/builtin/gob_enc.go b/vendor/github.com/nats-io/nats.go/encoders/builtin/gob_enc.go deleted file mode 100644 index 4e7cecba..00000000 --- a/vendor/github.com/nats-io/nats.go/encoders/builtin/gob_enc.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2013-2018 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package builtin - -import ( - "bytes" - "encoding/gob" -) - -// GobEncoder is a Go specific GOB Encoder implementation for EncodedConn. -// This encoder will use the builtin encoding/gob to Marshal -// and Unmarshal most types, including structs. -type GobEncoder struct { - // Empty -} - -// FIXME(dlc) - This could probably be more efficient. - -// Encode -func (ge *GobEncoder) Encode(subject string, v any) ([]byte, error) { - b := new(bytes.Buffer) - enc := gob.NewEncoder(b) - if err := enc.Encode(v); err != nil { - return nil, err - } - return b.Bytes(), nil -} - -// Decode -func (ge *GobEncoder) Decode(subject string, data []byte, vPtr any) (err error) { - dec := gob.NewDecoder(bytes.NewBuffer(data)) - err = dec.Decode(vPtr) - return -} diff --git a/vendor/github.com/nats-io/nats.go/encoders/builtin/json_enc.go b/vendor/github.com/nats-io/nats.go/encoders/builtin/json_enc.go deleted file mode 100644 index 9b6ffc01..00000000 --- a/vendor/github.com/nats-io/nats.go/encoders/builtin/json_enc.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2012-2018 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package builtin - -import ( - "encoding/json" - "strings" -) - -// JsonEncoder is a JSON Encoder implementation for EncodedConn. -// This encoder will use the builtin encoding/json to Marshal -// and Unmarshal most types, including structs. -type JsonEncoder struct { - // Empty -} - -// Encode -func (je *JsonEncoder) Encode(subject string, v any) ([]byte, error) { - b, err := json.Marshal(v) - if err != nil { - return nil, err - } - return b, nil -} - -// Decode -func (je *JsonEncoder) Decode(subject string, data []byte, vPtr any) (err error) { - switch arg := vPtr.(type) { - case *string: - // If they want a string and it is a JSON string, strip quotes - // This allows someone to send a struct but receive as a plain string - // This cast should be efficient for Go 1.3 and beyond. - str := string(data) - if strings.HasPrefix(str, `"`) && strings.HasSuffix(str, `"`) { - *arg = str[1 : len(str)-1] - } else { - *arg = str - } - case *[]byte: - *arg = data - default: - err = json.Unmarshal(data, arg) - } - return -} diff --git a/vendor/github.com/nats-io/nats.go/go_test.mod b/vendor/github.com/nats-io/nats.go/go_test.mod deleted file mode 100644 index 8902c1ed..00000000 --- a/vendor/github.com/nats-io/nats.go/go_test.mod +++ /dev/null @@ -1,22 +0,0 @@ -module github.com/nats-io/nats.go - -go 1.19 - -require ( - github.com/golang/protobuf v1.4.2 - github.com/klauspost/compress v1.17.0 - github.com/nats-io/nats-server/v2 v2.10.0 - github.com/nats-io/nkeys v0.4.5 - github.com/nats-io/nuid v1.0.1 - go.uber.org/goleak v1.2.1 - golang.org/x/text v0.13.0 - google.golang.org/protobuf v1.23.0 -) - -require ( - github.com/minio/highwayhash v1.0.2 // indirect - github.com/nats-io/jwt/v2 v2.5.2 // indirect - golang.org/x/crypto v0.13.0 // indirect - golang.org/x/sys v0.12.0 // indirect - golang.org/x/time v0.3.0 // indirect -) diff --git a/vendor/github.com/nats-io/nats.go/go_test.sum b/vendor/github.com/nats-io/nats.go/go_test.sum deleted file mode 100644 index ce4ba920..00000000 --- a/vendor/github.com/nats-io/nats.go/go_test.sum +++ /dev/null @@ -1,48 +0,0 @@ -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= -github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= -github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= -github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= -github.com/nats-io/jwt/v2 v2.5.2 h1:DhGH+nKt+wIkDxM6qnVSKjokq5t59AZV5HRcFW0zJwU= -github.com/nats-io/jwt/v2 v2.5.2/go.mod h1:24BeQtRwxRV8ruvC4CojXlx/WQ/VjuwlYiH+vu/+ibI= -github.com/nats-io/nats-server/v2 v2.10.0 h1:rcU++Hzo+wARxtJugrV3J5z5iGdHeVG8tT8Chb3bKDg= -github.com/nats-io/nats-server/v2 v2.10.0/go.mod h1:3PMvMSu2cuK0J9YInRLWdFpFsswKKGUS77zVSAudRto= -github.com/nats-io/nkeys v0.4.5 h1:Zdz2BUlFm4fJlierwvGK+yl20IAKUm7eV6AAZXEhkPk= -github.com/nats-io/nkeys v0.4.5/go.mod h1:XUkxdLPTufzlihbamfzQ7mw/VGx6ObUs+0bN5sNvt64= -github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= -golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= -golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= -golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/vendor/github.com/nats-io/nats.go/internal/parser/parse.go b/vendor/github.com/nats-io/nats.go/internal/parser/parse.go deleted file mode 100644 index 7eab8add..00000000 --- a/vendor/github.com/nats-io/nats.go/internal/parser/parse.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2020-2022 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package parser - -import ( - "errors" - "fmt" -) - -const ( - AckDomainTokenPos = iota + 2 - AckAccHashTokenPos - AckStreamTokenPos - AckConsumerTokenPos - AckNumDeliveredTokenPos - AckStreamSeqTokenPos - AckConsumerSeqTokenPos - AckTimestampSeqTokenPos - AckNumPendingTokenPos -) - -var ErrInvalidSubjectFormat = errors.New("invalid format of ACK subject") - -// Quick parser for positive numbers in ack reply encoding. -// NOTE: This parser does not detect uint64 overflow -func ParseNum(d string) (n uint64) { - if len(d) == 0 { - return 0 - } - - // ASCII numbers 0-9 - const ( - asciiZero = 48 - asciiNine = 57 - ) - - for _, dec := range d { - if dec < asciiZero || dec > asciiNine { - return 0 - } - n = n*10 + uint64(dec) - asciiZero - } - return -} - -func GetMetadataFields(subject string) ([]string, error) { - v1TokenCounts, v2TokenCounts := 9, 12 - - var start int - tokens := make([]string, 0, v2TokenCounts) - for i := 0; i < len(subject); i++ { - if subject[i] == '.' { - tokens = append(tokens, subject[start:i]) - start = i + 1 - } - } - tokens = append(tokens, subject[start:]) - // - // Newer server will include the domain name and account hash in the subject, - // and a token at the end. - // - // Old subject was: - // $JS.ACK....... - // - // New subject would be: - // $JS.ACK.......... - // - // v1 has 9 tokens, v2 has 12, but we must not be strict on the 12th since - // it may be removed in the future. Also, the library has no use for it. - // The point is that a v2 ACK subject is valid if it has at least 11 tokens. - // - tokensLen := len(tokens) - // If lower than 9 or more than 9 but less than 11, report an error - if tokensLen < v1TokenCounts || (tokensLen > v1TokenCounts && tokensLen < v2TokenCounts-1) { - return nil, ErrInvalidSubjectFormat - } - if tokens[0] != "$JS" || tokens[1] != "ACK" { - return nil, fmt.Errorf("%w: subject should start with $JS.ACK", ErrInvalidSubjectFormat) - } - // For v1 style, we insert 2 empty tokens (domain and hash) so that the - // rest of the library references known fields at a constant location. - if tokensLen == v1TokenCounts { - // Extend the array (we know the backend is big enough) - tokens = append(tokens[:AckDomainTokenPos+2], tokens[AckDomainTokenPos:]...) - // Clear the domain and hash tokens - tokens[AckDomainTokenPos], tokens[AckAccHashTokenPos] = "", "" - - } else if tokens[AckDomainTokenPos] == "_" { - // If domain is "_", replace with empty value. - tokens[AckDomainTokenPos] = "" - } - return tokens, nil -} diff --git a/vendor/github.com/nats-io/nats.go/js.go b/vendor/github.com/nats-io/nats.go/js.go deleted file mode 100644 index 7fdb0131..00000000 --- a/vendor/github.com/nats-io/nats.go/js.go +++ /dev/null @@ -1,3812 +0,0 @@ -// Copyright 2020-2023 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nats - -import ( - "bytes" - "context" - "crypto/sha256" - "encoding/json" - "errors" - "fmt" - "math/rand" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/nats-io/nats.go/internal/parser" - "github.com/nats-io/nuid" -) - -// JetStream allows persistent messaging through JetStream. -type JetStream interface { - // Publish publishes a message to JetStream. - Publish(subj string, data []byte, opts ...PubOpt) (*PubAck, error) - - // PublishMsg publishes a Msg to JetStream. - PublishMsg(m *Msg, opts ...PubOpt) (*PubAck, error) - - // PublishAsync publishes a message to JetStream and returns a PubAckFuture. - // The data should not be changed until the PubAckFuture has been processed. - PublishAsync(subj string, data []byte, opts ...PubOpt) (PubAckFuture, error) - - // PublishMsgAsync publishes a Msg to JetStream and returns a PubAckFuture. - // The message should not be changed until the PubAckFuture has been processed. - PublishMsgAsync(m *Msg, opts ...PubOpt) (PubAckFuture, error) - - // PublishAsyncPending returns the number of async publishes outstanding for this context. - PublishAsyncPending() int - - // PublishAsyncComplete returns a channel that will be closed when all outstanding messages are ack'd. - PublishAsyncComplete() <-chan struct{} - - // Subscribe creates an async Subscription for JetStream. - // The stream and consumer names can be provided with the nats.Bind() option. - // For creating an ephemeral (where the consumer name is picked by the server), - // you can provide the stream name with nats.BindStream(). - // If no stream name is specified, the library will attempt to figure out which - // stream the subscription is for. See important notes below for more details. - // - // IMPORTANT NOTES: - // * If none of the options Bind() nor Durable() are specified, the library will - // send a request to the server to create an ephemeral JetStream consumer, - // which will be deleted after an Unsubscribe() or Drain(), or automatically - // by the server after a short period of time after the NATS subscription is - // gone. - // * If Durable() option is specified, the library will attempt to lookup a JetStream - // consumer with this name, and if found, will bind to it and not attempt to - // delete it. However, if not found, the library will send a request to - // create such durable JetStream consumer. Note that the library will delete - // the JetStream consumer after an Unsubscribe() or Drain() only if it - // created the durable consumer while subscribing. If the durable consumer - // already existed prior to subscribing it won't be deleted. - // * If Bind() option is provided, the library will attempt to lookup the - // consumer with the given name, and if successful, bind to it. If the lookup fails, - // then the Subscribe() call will return an error. - Subscribe(subj string, cb MsgHandler, opts ...SubOpt) (*Subscription, error) - - // SubscribeSync creates a Subscription that can be used to process messages synchronously. - // See important note in Subscribe() - SubscribeSync(subj string, opts ...SubOpt) (*Subscription, error) - - // ChanSubscribe creates channel based Subscription. - // See important note in Subscribe() - ChanSubscribe(subj string, ch chan *Msg, opts ...SubOpt) (*Subscription, error) - - // ChanQueueSubscribe creates channel based Subscription with a queue group. - // See important note in QueueSubscribe() - ChanQueueSubscribe(subj, queue string, ch chan *Msg, opts ...SubOpt) (*Subscription, error) - - // QueueSubscribe creates a Subscription with a queue group. - // If no optional durable name nor binding options are specified, the queue name will be used as a durable name. - // See important note in Subscribe() - QueueSubscribe(subj, queue string, cb MsgHandler, opts ...SubOpt) (*Subscription, error) - - // QueueSubscribeSync creates a Subscription with a queue group that can be used to process messages synchronously. - // See important note in QueueSubscribe() - QueueSubscribeSync(subj, queue string, opts ...SubOpt) (*Subscription, error) - - // PullSubscribe creates a Subscription that can fetch messages. - // See important note in Subscribe(). Additionally, for an ephemeral pull consumer, the "durable" value must be - // set to an empty string. - PullSubscribe(subj, durable string, opts ...SubOpt) (*Subscription, error) -} - -// JetStreamContext allows JetStream messaging and stream management. -type JetStreamContext interface { - JetStream - JetStreamManager - KeyValueManager - ObjectStoreManager -} - -// Request API subjects for JetStream. -const ( - // defaultAPIPrefix is the default prefix for the JetStream API. - defaultAPIPrefix = "$JS.API." - - // jsDomainT is used to create JetStream API prefix by specifying only Domain - jsDomainT = "$JS.%s.API." - - // jsExtDomainT is used to create a StreamSource External APIPrefix - jsExtDomainT = "$JS.%s.API" - - // apiAccountInfo is for obtaining general information about JetStream. - apiAccountInfo = "INFO" - - // apiConsumerCreateT is used to create consumers. - // it accepts stream name and consumer name. - apiConsumerCreateT = "CONSUMER.CREATE.%s.%s" - - // apiConsumerCreateT is used to create consumers. - // it accepts stream name, consumer name and filter subject - apiConsumerCreateWithFilterSubjectT = "CONSUMER.CREATE.%s.%s.%s" - - // apiLegacyConsumerCreateT is used to create consumers. - // this is a legacy endpoint to support creating ephemerals before nats-server v2.9.0. - apiLegacyConsumerCreateT = "CONSUMER.CREATE.%s" - - // apiDurableCreateT is used to create durable consumers. - // this is a legacy endpoint to support creating durable consumers before nats-server v2.9.0. - apiDurableCreateT = "CONSUMER.DURABLE.CREATE.%s.%s" - - // apiConsumerInfoT is used to create consumers. - apiConsumerInfoT = "CONSUMER.INFO.%s.%s" - - // apiRequestNextT is the prefix for the request next message(s) for a consumer in worker/pull mode. - apiRequestNextT = "CONSUMER.MSG.NEXT.%s.%s" - - // apiConsumerDeleteT is used to delete consumers. - apiConsumerDeleteT = "CONSUMER.DELETE.%s.%s" - - // apiConsumerListT is used to return all detailed consumer information - apiConsumerListT = "CONSUMER.LIST.%s" - - // apiConsumerNamesT is used to return a list with all consumer names for the stream. - apiConsumerNamesT = "CONSUMER.NAMES.%s" - - // apiStreams can lookup a stream by subject. - apiStreams = "STREAM.NAMES" - - // apiStreamCreateT is the endpoint to create new streams. - apiStreamCreateT = "STREAM.CREATE.%s" - - // apiStreamInfoT is the endpoint to get information on a stream. - apiStreamInfoT = "STREAM.INFO.%s" - - // apiStreamUpdateT is the endpoint to update existing streams. - apiStreamUpdateT = "STREAM.UPDATE.%s" - - // apiStreamDeleteT is the endpoint to delete streams. - apiStreamDeleteT = "STREAM.DELETE.%s" - - // apiStreamPurgeT is the endpoint to purge streams. - apiStreamPurgeT = "STREAM.PURGE.%s" - - // apiStreamListT is the endpoint that will return all detailed stream information - apiStreamListT = "STREAM.LIST" - - // apiMsgGetT is the endpoint to get a message. - apiMsgGetT = "STREAM.MSG.GET.%s" - - // apiMsgGetT is the endpoint to perform a direct get of a message. - apiDirectMsgGetT = "DIRECT.GET.%s" - - // apiDirectMsgGetLastBySubjectT is the endpoint to perform a direct get of a message by subject. - apiDirectMsgGetLastBySubjectT = "DIRECT.GET.%s.%s" - - // apiMsgDeleteT is the endpoint to remove a message. - apiMsgDeleteT = "STREAM.MSG.DELETE.%s" - - // orderedHeartbeatsInterval is how fast we want HBs from the server during idle. - orderedHeartbeatsInterval = 5 * time.Second - - // Scale for threshold of missed HBs or lack of activity. - hbcThresh = 2 - - // For ChanSubscription, we can't update sub.delivered as we do for other - // type of subscriptions, since the channel is user provided. - // With flow control in play, we will check for flow control on incoming - // messages (as opposed to when they are delivered), but also from a go - // routine. Without this, the subscription would possibly stall until - // a new message or heartbeat/fc are received. - chanSubFCCheckInterval = 250 * time.Millisecond - - // Default time wait between retries on Publish iff err is NoResponders. - DefaultPubRetryWait = 250 * time.Millisecond - - // Default number of retries - DefaultPubRetryAttempts = 2 - - // defaultAsyncPubAckInflight is the number of async pub acks inflight. - defaultAsyncPubAckInflight = 4000 -) - -// Types of control messages, so far heartbeat and flow control -const ( - jsCtrlHB = 1 - jsCtrlFC = 2 -) - -// js is an internal struct from a JetStreamContext. -type js struct { - nc *Conn - opts *jsOpts - - // For async publish context. - mu sync.RWMutex - rpre string - rsub *Subscription - pafs map[string]*pubAckFuture - stc chan struct{} - dch chan struct{} - rr *rand.Rand - connStatusCh chan (Status) -} - -type jsOpts struct { - ctx context.Context - // For importing JetStream from other accounts. - pre string - // Amount of time to wait for API requests. - wait time.Duration - // For async publish error handling. - aecb MsgErrHandler - // Max async pub ack in flight - maxpa int - // the domain that produced the pre - domain string - // enables protocol tracing - ctrace ClientTrace - shouldTrace bool - // purgeOpts contains optional stream purge options - purgeOpts *StreamPurgeRequest - // streamInfoOpts contains optional stream info options - streamInfoOpts *StreamInfoRequest - // streamListSubject is used for subject filtering when listing streams / stream names - streamListSubject string - // For direct get message requests - directGet bool - // For direct get next message - directNextFor string - - // featureFlags are used to enable/disable specific JetStream features - featureFlags featureFlags -} - -const ( - defaultRequestWait = 5 * time.Second - defaultAccountCheck = 20 * time.Second -) - -// JetStream returns a JetStreamContext for messaging and stream management. -// Errors are only returned if inconsistent options are provided. -func (nc *Conn) JetStream(opts ...JSOpt) (JetStreamContext, error) { - js := &js{ - nc: nc, - opts: &jsOpts{ - pre: defaultAPIPrefix, - wait: defaultRequestWait, - maxpa: defaultAsyncPubAckInflight, - }, - } - - for _, opt := range opts { - if err := opt.configureJSContext(js.opts); err != nil { - return nil, err - } - } - return js, nil -} - -// JSOpt configures a JetStreamContext. -type JSOpt interface { - configureJSContext(opts *jsOpts) error -} - -// jsOptFn configures an option for the JetStreamContext. -type jsOptFn func(opts *jsOpts) error - -func (opt jsOptFn) configureJSContext(opts *jsOpts) error { - return opt(opts) -} - -type featureFlags struct { - useDurableConsumerCreate bool -} - -// UseLegacyDurableConsumers makes JetStream use the legacy (pre nats-server v2.9.0) subjects for consumer creation. -// If this option is used when creating JetStremContext, $JS.API.CONSUMER.DURABLE.CREATE.. will be used -// to create a consumer with Durable provided, rather than $JS.API.CONSUMER.CREATE... -func UseLegacyDurableConsumers() JSOpt { - return jsOptFn(func(opts *jsOpts) error { - opts.featureFlags.useDurableConsumerCreate = true - return nil - }) -} - -// ClientTrace can be used to trace API interactions for the JetStream Context. -type ClientTrace struct { - RequestSent func(subj string, payload []byte) - ResponseReceived func(subj string, payload []byte, hdr Header) -} - -func (ct ClientTrace) configureJSContext(js *jsOpts) error { - js.ctrace = ct - js.shouldTrace = true - return nil -} - -// Domain changes the domain part of JetStream API prefix. -func Domain(domain string) JSOpt { - if domain == _EMPTY_ { - return APIPrefix(_EMPTY_) - } - - return jsOptFn(func(js *jsOpts) error { - js.domain = domain - js.pre = fmt.Sprintf(jsDomainT, domain) - - return nil - }) - -} - -func (s *StreamPurgeRequest) configureJSContext(js *jsOpts) error { - js.purgeOpts = s - return nil -} - -func (s *StreamInfoRequest) configureJSContext(js *jsOpts) error { - js.streamInfoOpts = s - return nil -} - -// APIPrefix changes the default prefix used for the JetStream API. -func APIPrefix(pre string) JSOpt { - return jsOptFn(func(js *jsOpts) error { - if pre == _EMPTY_ { - return nil - } - - js.pre = pre - if !strings.HasSuffix(js.pre, ".") { - js.pre = js.pre + "." - } - - return nil - }) -} - -// DirectGet is an option that can be used to make GetMsg() or GetLastMsg() -// retrieve message directly from a group of servers (leader and replicas) -// if the stream was created with the AllowDirect option. -func DirectGet() JSOpt { - return jsOptFn(func(js *jsOpts) error { - js.directGet = true - return nil - }) -} - -// DirectGetNext is an option that can be used to make GetMsg() retrieve message -// directly from a group of servers (leader and replicas) if the stream was -// created with the AllowDirect option. -// The server will find the next message matching the filter `subject` starting -// at the start sequence (argument in GetMsg()). The filter `subject` can be a -// wildcard. -func DirectGetNext(subject string) JSOpt { - return jsOptFn(func(js *jsOpts) error { - js.directGet = true - js.directNextFor = subject - return nil - }) -} - -// StreamListFilter is an option that can be used to configure `StreamsInfo()` and `StreamNames()` requests. -// It allows filtering the returned streams by subject associated with each stream. -// Wildcards can be used. For example, `StreamListFilter(FOO.*.A) will return -// all streams which have at least one subject matching the provided pattern (e.g. FOO.TEST.A). -func StreamListFilter(subject string) JSOpt { - return jsOptFn(func(opts *jsOpts) error { - opts.streamListSubject = subject - return nil - }) -} - -func (js *js) apiSubj(subj string) string { - if js.opts.pre == _EMPTY_ { - return subj - } - var b strings.Builder - b.WriteString(js.opts.pre) - b.WriteString(subj) - return b.String() -} - -// PubOpt configures options for publishing JetStream messages. -type PubOpt interface { - configurePublish(opts *pubOpts) error -} - -// pubOptFn is a function option used to configure JetStream Publish. -type pubOptFn func(opts *pubOpts) error - -func (opt pubOptFn) configurePublish(opts *pubOpts) error { - return opt(opts) -} - -type pubOpts struct { - ctx context.Context - ttl time.Duration - id string - lid string // Expected last msgId - str string // Expected stream name - seq *uint64 // Expected last sequence - lss *uint64 // Expected last sequence per subject - - // Publish retries for NoResponders err. - rwait time.Duration // Retry wait between attempts - rnum int // Retry attempts - - // stallWait is the max wait of a async pub ack. - stallWait time.Duration -} - -// pubAckResponse is the ack response from the JetStream API when publishing a message. -type pubAckResponse struct { - apiResponse - *PubAck -} - -// PubAck is an ack received after successfully publishing a message. -type PubAck struct { - Stream string `json:"stream"` - Sequence uint64 `json:"seq"` - Duplicate bool `json:"duplicate,omitempty"` - Domain string `json:"domain,omitempty"` -} - -// Headers for published messages. -const ( - MsgIdHdr = "Nats-Msg-Id" - ExpectedStreamHdr = "Nats-Expected-Stream" - ExpectedLastSeqHdr = "Nats-Expected-Last-Sequence" - ExpectedLastSubjSeqHdr = "Nats-Expected-Last-Subject-Sequence" - ExpectedLastMsgIdHdr = "Nats-Expected-Last-Msg-Id" - MsgRollup = "Nats-Rollup" -) - -// Headers for republished messages and direct gets. -const ( - JSStream = "Nats-Stream" - JSSequence = "Nats-Sequence" - JSTimeStamp = "Nats-Time-Stamp" - JSSubject = "Nats-Subject" - JSLastSequence = "Nats-Last-Sequence" -) - -// MsgSize is a header that will be part of a consumer's delivered message if HeadersOnly requested. -const MsgSize = "Nats-Msg-Size" - -// Rollups, can be subject only or all messages. -const ( - MsgRollupSubject = "sub" - MsgRollupAll = "all" -) - -// PublishMsg publishes a Msg to a stream from JetStream. -func (js *js) PublishMsg(m *Msg, opts ...PubOpt) (*PubAck, error) { - var o = pubOpts{rwait: DefaultPubRetryWait, rnum: DefaultPubRetryAttempts} - if len(opts) > 0 { - if m.Header == nil { - m.Header = Header{} - } - for _, opt := range opts { - if err := opt.configurePublish(&o); err != nil { - return nil, err - } - } - } - // Check for option collisions. Right now just timeout and context. - if o.ctx != nil && o.ttl != 0 { - return nil, ErrContextAndTimeout - } - if o.ttl == 0 && o.ctx == nil { - o.ttl = js.opts.wait - } - if o.stallWait > 0 { - return nil, fmt.Errorf("nats: stall wait cannot be set to sync publish") - } - - if o.id != _EMPTY_ { - m.Header.Set(MsgIdHdr, o.id) - } - if o.lid != _EMPTY_ { - m.Header.Set(ExpectedLastMsgIdHdr, o.lid) - } - if o.str != _EMPTY_ { - m.Header.Set(ExpectedStreamHdr, o.str) - } - if o.seq != nil { - m.Header.Set(ExpectedLastSeqHdr, strconv.FormatUint(*o.seq, 10)) - } - if o.lss != nil { - m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(*o.lss, 10)) - } - - var resp *Msg - var err error - - if o.ttl > 0 { - resp, err = js.nc.RequestMsg(m, time.Duration(o.ttl)) - } else { - resp, err = js.nc.RequestMsgWithContext(o.ctx, m) - } - - if err != nil { - for r, ttl := 0, o.ttl; err == ErrNoResponders && (r < o.rnum || o.rnum < 0); r++ { - // To protect against small blips in leadership changes etc, if we get a no responders here retry. - if o.ctx != nil { - select { - case <-o.ctx.Done(): - case <-time.After(o.rwait): - } - } else { - time.Sleep(o.rwait) - } - if o.ttl > 0 { - ttl -= o.rwait - if ttl <= 0 { - err = ErrTimeout - break - } - resp, err = js.nc.RequestMsg(m, time.Duration(ttl)) - } else { - resp, err = js.nc.RequestMsgWithContext(o.ctx, m) - } - } - if err != nil { - if err == ErrNoResponders { - err = ErrNoStreamResponse - } - return nil, err - } - } - - var pa pubAckResponse - if err := json.Unmarshal(resp.Data, &pa); err != nil { - return nil, ErrInvalidJSAck - } - if pa.Error != nil { - return nil, pa.Error - } - if pa.PubAck == nil || pa.PubAck.Stream == _EMPTY_ { - return nil, ErrInvalidJSAck - } - return pa.PubAck, nil -} - -// Publish publishes a message to a stream from JetStream. -func (js *js) Publish(subj string, data []byte, opts ...PubOpt) (*PubAck, error) { - return js.PublishMsg(&Msg{Subject: subj, Data: data}, opts...) -} - -// PubAckFuture is a future for a PubAck. -type PubAckFuture interface { - // Ok returns a receive only channel that can be used to get a PubAck. - Ok() <-chan *PubAck - - // Err returns a receive only channel that can be used to get the error from an async publish. - Err() <-chan error - - // Msg returns the message that was sent to the server. - Msg() *Msg -} - -type pubAckFuture struct { - js *js - msg *Msg - pa *PubAck - st time.Time - err error - errCh chan error - doneCh chan *PubAck -} - -func (paf *pubAckFuture) Ok() <-chan *PubAck { - paf.js.mu.Lock() - defer paf.js.mu.Unlock() - - if paf.doneCh == nil { - paf.doneCh = make(chan *PubAck, 1) - if paf.pa != nil { - paf.doneCh <- paf.pa - } - } - - return paf.doneCh -} - -func (paf *pubAckFuture) Err() <-chan error { - paf.js.mu.Lock() - defer paf.js.mu.Unlock() - - if paf.errCh == nil { - paf.errCh = make(chan error, 1) - if paf.err != nil { - paf.errCh <- paf.err - } - } - - return paf.errCh -} - -func (paf *pubAckFuture) Msg() *Msg { - paf.js.mu.RLock() - defer paf.js.mu.RUnlock() - return paf.msg -} - -// For quick token lookup etc. -const aReplyPreLen = 14 -const aReplyTokensize = 6 - -func (js *js) newAsyncReply() string { - js.mu.Lock() - if js.rsub == nil { - // Create our wildcard reply subject. - sha := sha256.New() - sha.Write([]byte(nuid.Next())) - b := sha.Sum(nil) - for i := 0; i < aReplyTokensize; i++ { - b[i] = rdigits[int(b[i]%base)] - } - inboxPrefix := InboxPrefix - if js.nc.Opts.InboxPrefix != _EMPTY_ { - inboxPrefix = js.nc.Opts.InboxPrefix + "." - } - js.rpre = fmt.Sprintf("%s%s.", inboxPrefix, b[:aReplyTokensize]) - sub, err := js.nc.Subscribe(fmt.Sprintf("%s*", js.rpre), js.handleAsyncReply) - if err != nil { - js.mu.Unlock() - return _EMPTY_ - } - js.rsub = sub - js.rr = rand.New(rand.NewSource(time.Now().UnixNano())) - } - if js.connStatusCh == nil { - js.connStatusCh = js.nc.StatusChanged(RECONNECTING, CLOSED) - go js.resetPendingAcksOnReconnect() - } - var sb strings.Builder - sb.WriteString(js.rpre) - rn := js.rr.Int63() - var b [aReplyTokensize]byte - for i, l := 0, rn; i < len(b); i++ { - b[i] = rdigits[l%base] - l /= base - } - sb.Write(b[:]) - js.mu.Unlock() - return sb.String() -} - -func (js *js) resetPendingAcksOnReconnect() { - js.mu.Lock() - connStatusCh := js.connStatusCh - js.mu.Unlock() - for { - newStatus, ok := <-connStatusCh - if !ok || newStatus == CLOSED { - return - } - js.mu.Lock() - for _, paf := range js.pafs { - paf.err = ErrDisconnected - } - js.pafs = nil - if js.dch != nil { - close(js.dch) - js.dch = nil - } - js.mu.Unlock() - } -} - -func (js *js) cleanupReplySub() { - js.mu.Lock() - if js.rsub != nil { - js.rsub.Unsubscribe() - js.rsub = nil - } - if js.connStatusCh != nil { - close(js.connStatusCh) - js.connStatusCh = nil - } - js.mu.Unlock() -} - -// registerPAF will register for a PubAckFuture. -func (js *js) registerPAF(id string, paf *pubAckFuture) (int, int) { - js.mu.Lock() - if js.pafs == nil { - js.pafs = make(map[string]*pubAckFuture) - } - paf.js = js - js.pafs[id] = paf - np := len(js.pafs) - maxpa := js.opts.maxpa - js.mu.Unlock() - return np, maxpa -} - -// Lock should be held. -func (js *js) getPAF(id string) *pubAckFuture { - if js.pafs == nil { - return nil - } - return js.pafs[id] -} - -// clearPAF will remove a PubAckFuture that was registered. -func (js *js) clearPAF(id string) { - js.mu.Lock() - delete(js.pafs, id) - js.mu.Unlock() -} - -// PublishAsyncPending returns how many PubAckFutures are pending. -func (js *js) PublishAsyncPending() int { - js.mu.RLock() - defer js.mu.RUnlock() - return len(js.pafs) -} - -func (js *js) asyncStall() <-chan struct{} { - js.mu.Lock() - if js.stc == nil { - js.stc = make(chan struct{}) - } - stc := js.stc - js.mu.Unlock() - return stc -} - -// Handle an async reply from PublishAsync. -func (js *js) handleAsyncReply(m *Msg) { - if len(m.Subject) <= aReplyPreLen { - return - } - id := m.Subject[aReplyPreLen:] - - js.mu.Lock() - paf := js.getPAF(id) - if paf == nil { - js.mu.Unlock() - return - } - // Remove - delete(js.pafs, id) - - // Check on anyone stalled and waiting. - if js.stc != nil && len(js.pafs) < js.opts.maxpa { - close(js.stc) - js.stc = nil - } - // Check on anyone one waiting on done status. - if js.dch != nil && len(js.pafs) == 0 { - dch := js.dch - js.dch = nil - // Defer here so error is processed and can be checked. - defer close(dch) - } - - doErr := func(err error) { - paf.err = err - if paf.errCh != nil { - paf.errCh <- paf.err - } - cb := js.opts.aecb - js.mu.Unlock() - if cb != nil { - cb(paf.js, paf.msg, err) - } - } - - // Process no responders etc. - if len(m.Data) == 0 && m.Header.Get(statusHdr) == noResponders { - doErr(ErrNoResponders) - return - } - - var pa pubAckResponse - if err := json.Unmarshal(m.Data, &pa); err != nil { - doErr(ErrInvalidJSAck) - return - } - if pa.Error != nil { - doErr(pa.Error) - return - } - if pa.PubAck == nil || pa.PubAck.Stream == _EMPTY_ { - doErr(ErrInvalidJSAck) - return - } - - // So here we have received a proper puback. - paf.pa = pa.PubAck - if paf.doneCh != nil { - paf.doneCh <- paf.pa - } - js.mu.Unlock() -} - -// MsgErrHandler is used to process asynchronous errors from -// JetStream PublishAsync. It will return the original -// message sent to the server for possible retransmitting and the error encountered. -type MsgErrHandler func(JetStream, *Msg, error) - -// PublishAsyncErrHandler sets the error handler for async publishes in JetStream. -func PublishAsyncErrHandler(cb MsgErrHandler) JSOpt { - return jsOptFn(func(js *jsOpts) error { - js.aecb = cb - return nil - }) -} - -// PublishAsyncMaxPending sets the maximum outstanding async publishes that can be inflight at one time. -func PublishAsyncMaxPending(max int) JSOpt { - return jsOptFn(func(js *jsOpts) error { - if max < 1 { - return errors.New("nats: max ack pending should be >= 1") - } - js.maxpa = max - return nil - }) -} - -// PublishAsync publishes a message to JetStream and returns a PubAckFuture -func (js *js) PublishAsync(subj string, data []byte, opts ...PubOpt) (PubAckFuture, error) { - return js.PublishMsgAsync(&Msg{Subject: subj, Data: data}, opts...) -} - -const defaultStallWait = 200 * time.Millisecond - -func (js *js) PublishMsgAsync(m *Msg, opts ...PubOpt) (PubAckFuture, error) { - var o pubOpts - if len(opts) > 0 { - if m.Header == nil { - m.Header = Header{} - } - for _, opt := range opts { - if err := opt.configurePublish(&o); err != nil { - return nil, err - } - } - } - - // Timeouts and contexts do not make sense for these. - if o.ttl != 0 || o.ctx != nil { - return nil, ErrContextAndTimeout - } - stallWait := defaultStallWait - if o.stallWait > 0 { - stallWait = o.stallWait - } - - // FIXME(dlc) - Make common. - if o.id != _EMPTY_ { - m.Header.Set(MsgIdHdr, o.id) - } - if o.lid != _EMPTY_ { - m.Header.Set(ExpectedLastMsgIdHdr, o.lid) - } - if o.str != _EMPTY_ { - m.Header.Set(ExpectedStreamHdr, o.str) - } - if o.seq != nil { - m.Header.Set(ExpectedLastSeqHdr, strconv.FormatUint(*o.seq, 10)) - } - if o.lss != nil { - m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(*o.lss, 10)) - } - - // Reply - if m.Reply != _EMPTY_ { - return nil, errors.New("nats: reply subject should be empty") - } - reply := m.Reply - m.Reply = js.newAsyncReply() - defer func() { m.Reply = reply }() - - if m.Reply == _EMPTY_ { - return nil, errors.New("nats: error creating async reply handler") - } - - id := m.Reply[aReplyPreLen:] - paf := &pubAckFuture{msg: m, st: time.Now()} - numPending, maxPending := js.registerPAF(id, paf) - - if maxPending > 0 && numPending >= maxPending { - select { - case <-js.asyncStall(): - case <-time.After(stallWait): - js.clearPAF(id) - return nil, errors.New("nats: stalled with too many outstanding async published messages") - } - } - if err := js.nc.PublishMsg(m); err != nil { - js.clearPAF(id) - return nil, err - } - - return paf, nil -} - -// PublishAsyncComplete returns a channel that will be closed when all outstanding messages have been ack'd. -func (js *js) PublishAsyncComplete() <-chan struct{} { - js.mu.Lock() - defer js.mu.Unlock() - if js.dch == nil { - js.dch = make(chan struct{}) - } - dch := js.dch - if len(js.pafs) == 0 { - close(js.dch) - js.dch = nil - } - return dch -} - -// MsgId sets the message ID used for deduplication. -func MsgId(id string) PubOpt { - return pubOptFn(func(opts *pubOpts) error { - opts.id = id - return nil - }) -} - -// ExpectStream sets the expected stream to respond from the publish. -func ExpectStream(stream string) PubOpt { - return pubOptFn(func(opts *pubOpts) error { - opts.str = stream - return nil - }) -} - -// ExpectLastSequence sets the expected sequence in the response from the publish. -func ExpectLastSequence(seq uint64) PubOpt { - return pubOptFn(func(opts *pubOpts) error { - opts.seq = &seq - return nil - }) -} - -// ExpectLastSequencePerSubject sets the expected sequence per subject in the response from the publish. -func ExpectLastSequencePerSubject(seq uint64) PubOpt { - return pubOptFn(func(opts *pubOpts) error { - opts.lss = &seq - return nil - }) -} - -// ExpectLastMsgId sets the expected last msgId in the response from the publish. -func ExpectLastMsgId(id string) PubOpt { - return pubOptFn(func(opts *pubOpts) error { - opts.lid = id - return nil - }) -} - -// RetryWait sets the retry wait time when ErrNoResponders is encountered. -func RetryWait(dur time.Duration) PubOpt { - return pubOptFn(func(opts *pubOpts) error { - opts.rwait = dur - return nil - }) -} - -// RetryAttempts sets the retry number of attempts when ErrNoResponders is encountered. -func RetryAttempts(num int) PubOpt { - return pubOptFn(func(opts *pubOpts) error { - opts.rnum = num - return nil - }) -} - -// StallWait sets the max wait when the producer becomes stall producing messages. -func StallWait(ttl time.Duration) PubOpt { - return pubOptFn(func(opts *pubOpts) error { - if ttl <= 0 { - return fmt.Errorf("nats: stall wait should be more than 0") - } - opts.stallWait = ttl - return nil - }) -} - -type ackOpts struct { - ttl time.Duration - ctx context.Context - nakDelay time.Duration -} - -// AckOpt are the options that can be passed when acknowledge a message. -type AckOpt interface { - configureAck(opts *ackOpts) error -} - -// MaxWait sets the maximum amount of time we will wait for a response. -type MaxWait time.Duration - -func (ttl MaxWait) configureJSContext(js *jsOpts) error { - js.wait = time.Duration(ttl) - return nil -} - -func (ttl MaxWait) configurePull(opts *pullOpts) error { - opts.ttl = time.Duration(ttl) - return nil -} - -// AckWait sets the maximum amount of time we will wait for an ack. -type AckWait time.Duration - -func (ttl AckWait) configurePublish(opts *pubOpts) error { - opts.ttl = time.Duration(ttl) - return nil -} - -func (ttl AckWait) configureSubscribe(opts *subOpts) error { - opts.cfg.AckWait = time.Duration(ttl) - return nil -} - -func (ttl AckWait) configureAck(opts *ackOpts) error { - opts.ttl = time.Duration(ttl) - return nil -} - -// ContextOpt is an option used to set a context.Context. -type ContextOpt struct { - context.Context -} - -func (ctx ContextOpt) configureJSContext(opts *jsOpts) error { - opts.ctx = ctx - return nil -} - -func (ctx ContextOpt) configurePublish(opts *pubOpts) error { - opts.ctx = ctx - return nil -} - -func (ctx ContextOpt) configureSubscribe(opts *subOpts) error { - opts.ctx = ctx - return nil -} - -func (ctx ContextOpt) configurePull(opts *pullOpts) error { - opts.ctx = ctx - return nil -} - -func (ctx ContextOpt) configureAck(opts *ackOpts) error { - opts.ctx = ctx - return nil -} - -// Context returns an option that can be used to configure a context for APIs -// that are context aware such as those part of the JetStream interface. -func Context(ctx context.Context) ContextOpt { - return ContextOpt{ctx} -} - -type nakDelay time.Duration - -func (d nakDelay) configureAck(opts *ackOpts) error { - opts.nakDelay = time.Duration(d) - return nil -} - -// Subscribe - -// ConsumerConfig is the configuration of a JetStream consumer. -type ConsumerConfig struct { - Durable string `json:"durable_name,omitempty"` - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - DeliverPolicy DeliverPolicy `json:"deliver_policy"` - OptStartSeq uint64 `json:"opt_start_seq,omitempty"` - OptStartTime *time.Time `json:"opt_start_time,omitempty"` - AckPolicy AckPolicy `json:"ack_policy"` - AckWait time.Duration `json:"ack_wait,omitempty"` - MaxDeliver int `json:"max_deliver,omitempty"` - BackOff []time.Duration `json:"backoff,omitempty"` - FilterSubject string `json:"filter_subject,omitempty"` - FilterSubjects []string `json:"filter_subjects,omitempty"` - ReplayPolicy ReplayPolicy `json:"replay_policy"` - RateLimit uint64 `json:"rate_limit_bps,omitempty"` // Bits per sec - SampleFrequency string `json:"sample_freq,omitempty"` - MaxWaiting int `json:"max_waiting,omitempty"` - MaxAckPending int `json:"max_ack_pending,omitempty"` - FlowControl bool `json:"flow_control,omitempty"` - Heartbeat time.Duration `json:"idle_heartbeat,omitempty"` - HeadersOnly bool `json:"headers_only,omitempty"` - - // Pull based options. - MaxRequestBatch int `json:"max_batch,omitempty"` - MaxRequestExpires time.Duration `json:"max_expires,omitempty"` - MaxRequestMaxBytes int `json:"max_bytes,omitempty"` - - // Push based consumers. - DeliverSubject string `json:"deliver_subject,omitempty"` - DeliverGroup string `json:"deliver_group,omitempty"` - - // Inactivity threshold. - InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"` - - // Generally inherited by parent stream and other markers, now can be configured directly. - Replicas int `json:"num_replicas"` - // Force memory storage. - MemoryStorage bool `json:"mem_storage,omitempty"` - - // Metadata is additional metadata for the Consumer. - // Keys starting with `_nats` are reserved. - // NOTE: Metadata requires nats-server v2.10.0+ - Metadata map[string]string `json:"metadata,omitempty"` -} - -// ConsumerInfo is the info from a JetStream consumer. -type ConsumerInfo struct { - Stream string `json:"stream_name"` - Name string `json:"name"` - Created time.Time `json:"created"` - Config ConsumerConfig `json:"config"` - Delivered SequenceInfo `json:"delivered"` - AckFloor SequenceInfo `json:"ack_floor"` - NumAckPending int `json:"num_ack_pending"` - NumRedelivered int `json:"num_redelivered"` - NumWaiting int `json:"num_waiting"` - NumPending uint64 `json:"num_pending"` - Cluster *ClusterInfo `json:"cluster,omitempty"` - PushBound bool `json:"push_bound,omitempty"` -} - -// SequenceInfo has both the consumer and the stream sequence and last activity. -type SequenceInfo struct { - Consumer uint64 `json:"consumer_seq"` - Stream uint64 `json:"stream_seq"` - Last *time.Time `json:"last_active,omitempty"` -} - -// SequencePair includes the consumer and stream sequence info from a JetStream consumer. -type SequencePair struct { - Consumer uint64 `json:"consumer_seq"` - Stream uint64 `json:"stream_seq"` -} - -// nextRequest is for getting next messages for pull based consumers from JetStream. -type nextRequest struct { - Expires time.Duration `json:"expires,omitempty"` - Batch int `json:"batch,omitempty"` - NoWait bool `json:"no_wait,omitempty"` - MaxBytes int `json:"max_bytes,omitempty"` - Heartbeat time.Duration `json:"idle_heartbeat,omitempty"` -} - -// jsSub includes JetStream subscription info. -type jsSub struct { - js *js - - // For pull subscribers, this is the next message subject to send requests to. - nms string - - psubj string // the subject that was passed by user to the subscribe calls - consumer string - stream string - deliver string - pull bool - dc bool // Delete JS consumer - ackNone bool - - // This is ConsumerInfo's Pending+Consumer.Delivered that we get from the - // add consumer response. Note that some versions of the server gather the - // consumer info *after* the creation of the consumer, which means that - // some messages may have been already delivered. So the sum of the two - // is a more accurate representation of the number of messages pending or - // in the process of being delivered to the subscription when created. - pending uint64 - - // Ordered consumers - ordered bool - dseq uint64 - sseq uint64 - ccreq *createConsumerRequest - - // Heartbeats and Flow Control handling from push consumers. - hbc *time.Timer - hbi time.Duration - active bool - cmeta string - fcr string - fcd uint64 - fciseq uint64 - csfct *time.Timer - - // Cancellation function to cancel context on drain/unsubscribe. - cancel func() -} - -// Deletes the JS Consumer. -// No connection nor subscription lock must be held on entry. -func (sub *Subscription) deleteConsumer() error { - sub.mu.Lock() - jsi := sub.jsi - if jsi == nil { - sub.mu.Unlock() - return nil - } - stream, consumer := jsi.stream, jsi.consumer - js := jsi.js - sub.mu.Unlock() - - return js.DeleteConsumer(stream, consumer) -} - -// SubOpt configures options for subscribing to JetStream consumers. -type SubOpt interface { - configureSubscribe(opts *subOpts) error -} - -// subOptFn is a function option used to configure a JetStream Subscribe. -type subOptFn func(opts *subOpts) error - -func (opt subOptFn) configureSubscribe(opts *subOpts) error { - return opt(opts) -} - -// Subscribe creates an async Subscription for JetStream. -// The stream and consumer names can be provided with the nats.Bind() option. -// For creating an ephemeral (where the consumer name is picked by the server), -// you can provide the stream name with nats.BindStream(). -// If no stream name is specified, the library will attempt to figure out which -// stream the subscription is for. See important notes below for more details. -// -// IMPORTANT NOTES: -// * If none of the options Bind() nor Durable() are specified, the library will -// send a request to the server to create an ephemeral JetStream consumer, -// which will be deleted after an Unsubscribe() or Drain(), or automatically -// by the server after a short period of time after the NATS subscription is -// gone. -// * If Durable() option is specified, the library will attempt to lookup a JetStream -// consumer with this name, and if found, will bind to it and not attempt to -// delete it. However, if not found, the library will send a request to create -// such durable JetStream consumer. The library will delete the JetStream consumer -// after an Unsubscribe() or Drain(). -// * If Bind() option is provided, the library will attempt to lookup the -// consumer with the given name, and if successful, bind to it. If the lookup fails, -// then the Subscribe() call will return an error. -func (js *js) Subscribe(subj string, cb MsgHandler, opts ...SubOpt) (*Subscription, error) { - if cb == nil { - return nil, ErrBadSubscription - } - return js.subscribe(subj, _EMPTY_, cb, nil, false, false, opts) -} - -// SubscribeSync creates a Subscription that can be used to process messages synchronously. -// See important note in Subscribe() -func (js *js) SubscribeSync(subj string, opts ...SubOpt) (*Subscription, error) { - mch := make(chan *Msg, js.nc.Opts.SubChanLen) - return js.subscribe(subj, _EMPTY_, nil, mch, true, false, opts) -} - -// QueueSubscribe creates a Subscription with a queue group. -// If no optional durable name nor binding options are specified, the queue name will be used as a durable name. -// See important note in Subscribe() -func (js *js) QueueSubscribe(subj, queue string, cb MsgHandler, opts ...SubOpt) (*Subscription, error) { - if cb == nil { - return nil, ErrBadSubscription - } - return js.subscribe(subj, queue, cb, nil, false, false, opts) -} - -// QueueSubscribeSync creates a Subscription with a queue group that can be used to process messages synchronously. -// See important note in QueueSubscribe() -func (js *js) QueueSubscribeSync(subj, queue string, opts ...SubOpt) (*Subscription, error) { - mch := make(chan *Msg, js.nc.Opts.SubChanLen) - return js.subscribe(subj, queue, nil, mch, true, false, opts) -} - -// ChanSubscribe creates channel based Subscription. -// Using ChanSubscribe without buffered capacity is not recommended since -// it will be prone to dropping messages with a slow consumer error. Make sure to give the channel enough -// capacity to handle bursts in traffic, for example other Subscribe APIs use a default of 512k capacity in comparison. -// See important note in Subscribe() -func (js *js) ChanSubscribe(subj string, ch chan *Msg, opts ...SubOpt) (*Subscription, error) { - return js.subscribe(subj, _EMPTY_, nil, ch, false, false, opts) -} - -// ChanQueueSubscribe creates channel based Subscription with a queue group. -// See important note in QueueSubscribe() -func (js *js) ChanQueueSubscribe(subj, queue string, ch chan *Msg, opts ...SubOpt) (*Subscription, error) { - return js.subscribe(subj, queue, nil, ch, false, false, opts) -} - -// PullSubscribe creates a Subscription that can fetch messages. -// See important note in Subscribe() -func (js *js) PullSubscribe(subj, durable string, opts ...SubOpt) (*Subscription, error) { - mch := make(chan *Msg, js.nc.Opts.SubChanLen) - if durable != "" { - opts = append(opts, Durable(durable)) - } - return js.subscribe(subj, _EMPTY_, nil, mch, true, true, opts) -} - -func processConsInfo(info *ConsumerInfo, userCfg *ConsumerConfig, isPullMode bool, subj, queue string) (string, error) { - ccfg := &info.Config - - // Make sure this new subject matches or is a subset. - if ccfg.FilterSubject != _EMPTY_ && subj != ccfg.FilterSubject { - return _EMPTY_, ErrSubjectMismatch - } - - // Prevent binding a subscription against incompatible consumer types. - if isPullMode && ccfg.DeliverSubject != _EMPTY_ { - return _EMPTY_, ErrPullSubscribeToPushConsumer - } else if !isPullMode && ccfg.DeliverSubject == _EMPTY_ { - return _EMPTY_, ErrPullSubscribeRequired - } - - // If pull mode, nothing else to check here. - if isPullMode { - return _EMPTY_, checkConfig(ccfg, userCfg) - } - - // At this point, we know the user wants push mode, and the JS consumer is - // really push mode. - - dg := info.Config.DeliverGroup - if dg == _EMPTY_ { - // Prevent an user from attempting to create a queue subscription on - // a JS consumer that was not created with a deliver group. - if queue != _EMPTY_ { - return _EMPTY_, fmt.Errorf("cannot create a queue subscription for a consumer without a deliver group") - } else if info.PushBound { - // Need to reject a non queue subscription to a non queue consumer - // if the consumer is already bound. - return _EMPTY_, fmt.Errorf("consumer is already bound to a subscription") - } - } else { - // If the JS consumer has a deliver group, we need to fail a non queue - // subscription attempt: - if queue == _EMPTY_ { - return _EMPTY_, fmt.Errorf("cannot create a subscription for a consumer with a deliver group %q", dg) - } else if queue != dg { - // Here the user's queue group name does not match the one associated - // with the JS consumer. - return _EMPTY_, fmt.Errorf("cannot create a queue subscription %q for a consumer with a deliver group %q", - queue, dg) - } - } - if err := checkConfig(ccfg, userCfg); err != nil { - return _EMPTY_, err - } - return ccfg.DeliverSubject, nil -} - -func checkConfig(s, u *ConsumerConfig) error { - makeErr := func(fieldName string, usrVal, srvVal any) error { - return fmt.Errorf("configuration requests %s to be %v, but consumer's value is %v", fieldName, usrVal, srvVal) - } - - if u.Durable != _EMPTY_ && u.Durable != s.Durable { - return makeErr("durable", u.Durable, s.Durable) - } - if u.Description != _EMPTY_ && u.Description != s.Description { - return makeErr("description", u.Description, s.Description) - } - if u.DeliverPolicy != deliverPolicyNotSet && u.DeliverPolicy != s.DeliverPolicy { - return makeErr("deliver policy", u.DeliverPolicy, s.DeliverPolicy) - } - if u.OptStartSeq > 0 && u.OptStartSeq != s.OptStartSeq { - return makeErr("optional start sequence", u.OptStartSeq, s.OptStartSeq) - } - if u.OptStartTime != nil && !u.OptStartTime.IsZero() && !(*u.OptStartTime).Equal(*s.OptStartTime) { - return makeErr("optional start time", u.OptStartTime, s.OptStartTime) - } - if u.AckPolicy != ackPolicyNotSet && u.AckPolicy != s.AckPolicy { - return makeErr("ack policy", u.AckPolicy, s.AckPolicy) - } - if u.AckWait > 0 && u.AckWait != s.AckWait { - return makeErr("ack wait", u.AckWait, s.AckWait) - } - if u.MaxDeliver > 0 && u.MaxDeliver != s.MaxDeliver { - return makeErr("max deliver", u.MaxDeliver, s.MaxDeliver) - } - if u.ReplayPolicy != replayPolicyNotSet && u.ReplayPolicy != s.ReplayPolicy { - return makeErr("replay policy", u.ReplayPolicy, s.ReplayPolicy) - } - if u.RateLimit > 0 && u.RateLimit != s.RateLimit { - return makeErr("rate limit", u.RateLimit, s.RateLimit) - } - if u.SampleFrequency != _EMPTY_ && u.SampleFrequency != s.SampleFrequency { - return makeErr("sample frequency", u.SampleFrequency, s.SampleFrequency) - } - if u.MaxWaiting > 0 && u.MaxWaiting != s.MaxWaiting { - return makeErr("max waiting", u.MaxWaiting, s.MaxWaiting) - } - if u.MaxAckPending > 0 && u.MaxAckPending != s.MaxAckPending { - return makeErr("max ack pending", u.MaxAckPending, s.MaxAckPending) - } - // For flow control, we want to fail if the user explicit wanted it, but - // it is not set in the existing consumer. If it is not asked by the user, - // the library still handles it and so no reason to fail. - if u.FlowControl && !s.FlowControl { - return makeErr("flow control", u.FlowControl, s.FlowControl) - } - if u.Heartbeat > 0 && u.Heartbeat != s.Heartbeat { - return makeErr("heartbeat", u.Heartbeat, s.Heartbeat) - } - if u.Replicas > 0 && u.Replicas != s.Replicas { - return makeErr("replicas", u.Replicas, s.Replicas) - } - if u.MemoryStorage && !s.MemoryStorage { - return makeErr("memory storage", u.MemoryStorage, s.MemoryStorage) - } - return nil -} - -func (js *js) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync, isPullMode bool, opts []SubOpt) (*Subscription, error) { - cfg := ConsumerConfig{ - DeliverPolicy: deliverPolicyNotSet, - AckPolicy: ackPolicyNotSet, - ReplayPolicy: replayPolicyNotSet, - } - o := subOpts{cfg: &cfg} - if len(opts) > 0 { - for _, opt := range opts { - if opt == nil { - continue - } - if err := opt.configureSubscribe(&o); err != nil { - return nil, err - } - } - } - - // If no stream name is specified, the subject cannot be empty. - if subj == _EMPTY_ && o.stream == _EMPTY_ { - return nil, fmt.Errorf("nats: subject required") - } - - // Note that these may change based on the consumer info response we may get. - hasHeartbeats := o.cfg.Heartbeat > 0 - hasFC := o.cfg.FlowControl - - // Some checks for pull subscribers - if isPullMode { - // No deliver subject should be provided - if o.cfg.DeliverSubject != _EMPTY_ { - return nil, ErrPullSubscribeToPushConsumer - } - } - - // Some check/setting specific to queue subs - if queue != _EMPTY_ { - // Queue subscriber cannot have HB or FC (since messages will be randomly dispatched - // to members). We may in the future have a separate NATS subscription that all members - // would subscribe to and server would send on. - if o.cfg.Heartbeat > 0 || o.cfg.FlowControl { - // Not making this a public ErrXXX in case we allow in the future. - return nil, fmt.Errorf("nats: queue subscription doesn't support idle heartbeat nor flow control") - } - - // If this is a queue subscription and no consumer nor durable name was specified, - // then we will use the queue name as a durable name. - if o.consumer == _EMPTY_ && o.cfg.Durable == _EMPTY_ { - if err := checkConsumerName(queue); err != nil { - return nil, err - } - o.cfg.Durable = queue - } - } - - var ( - err error - shouldCreate bool - info *ConsumerInfo - deliver string - stream = o.stream - consumer = o.consumer - isDurable = o.cfg.Durable != _EMPTY_ - consumerBound = o.bound - ctx = o.ctx - skipCInfo = o.skipCInfo - notFoundErr bool - lookupErr bool - nc = js.nc - nms string - hbi time.Duration - ccreq *createConsumerRequest // In case we need to hold onto it for ordered consumers. - maxap int - ) - - // Do some quick checks here for ordered consumers. We do these here instead of spread out - // in the individual SubOpts. - if o.ordered { - // Make sure we are not durable. - if isDurable { - return nil, fmt.Errorf("nats: durable can not be set for an ordered consumer") - } - // Check ack policy. - if o.cfg.AckPolicy != ackPolicyNotSet { - return nil, fmt.Errorf("nats: ack policy can not be set for an ordered consumer") - } - // Check max deliver. - if o.cfg.MaxDeliver != 1 && o.cfg.MaxDeliver != 0 { - return nil, fmt.Errorf("nats: max deliver can not be set for an ordered consumer") - } - // No deliver subject, we pick our own. - if o.cfg.DeliverSubject != _EMPTY_ { - return nil, fmt.Errorf("nats: deliver subject can not be set for an ordered consumer") - } - // Queue groups not allowed. - if queue != _EMPTY_ { - return nil, fmt.Errorf("nats: queues not be set for an ordered consumer") - } - // Check for bound consumers. - if consumer != _EMPTY_ { - return nil, fmt.Errorf("nats: can not bind existing consumer for an ordered consumer") - } - // Check for pull mode. - if isPullMode { - return nil, fmt.Errorf("nats: can not use pull mode for an ordered consumer") - } - // Setup how we need it to be here. - o.cfg.FlowControl = true - o.cfg.AckPolicy = AckNonePolicy - o.cfg.MaxDeliver = 1 - o.cfg.AckWait = 22 * time.Hour // Just set to something known, not utilized. - // Force R1 and MemoryStorage for these. - o.cfg.Replicas = 1 - o.cfg.MemoryStorage = true - - if !hasHeartbeats { - o.cfg.Heartbeat = orderedHeartbeatsInterval - } - hasFC, hasHeartbeats = true, true - o.mack = true // To avoid auto-ack wrapping call below. - hbi = o.cfg.Heartbeat - } - - // In case a consumer has not been set explicitly, then the - // durable name will be used as the consumer name. - if consumer == _EMPTY_ { - consumer = o.cfg.Durable - } - - // Find the stream mapped to the subject if not bound to a stream already. - if stream == _EMPTY_ { - stream, err = js.StreamNameBySubject(subj) - if err != nil { - return nil, err - } - } - - // With an explicit durable name, we can lookup the consumer first - // to which it should be attaching to. - // If SkipConsumerLookup was used, do not call consumer info. - if consumer != _EMPTY_ && !o.skipCInfo { - info, err = js.ConsumerInfo(stream, consumer) - notFoundErr = errors.Is(err, ErrConsumerNotFound) - lookupErr = err == ErrJetStreamNotEnabled || err == ErrTimeout || err == context.DeadlineExceeded - } - - switch { - case info != nil: - deliver, err = processConsInfo(info, o.cfg, isPullMode, subj, queue) - if err != nil { - return nil, err - } - icfg := &info.Config - hasFC, hbi = icfg.FlowControl, icfg.Heartbeat - hasHeartbeats = hbi > 0 - maxap = icfg.MaxAckPending - case (err != nil && !notFoundErr) || (notFoundErr && consumerBound): - // If the consumer is being bound and we got an error on pull subscribe then allow the error. - if !(isPullMode && lookupErr && consumerBound) { - return nil, err - } - case skipCInfo: - // When skipping consumer info, need to rely on the manually passed sub options - // to match the expected behavior from the subscription. - hasFC, hbi = o.cfg.FlowControl, o.cfg.Heartbeat - hasHeartbeats = hbi > 0 - maxap = o.cfg.MaxAckPending - deliver = o.cfg.DeliverSubject - if consumerBound { - break - } - - // When not bound to a consumer already, proceed to create. - fallthrough - default: - // Attempt to create consumer if not found nor using Bind. - shouldCreate = true - if o.cfg.DeliverSubject != _EMPTY_ { - deliver = o.cfg.DeliverSubject - } else if !isPullMode { - deliver = nc.NewInbox() - cfg.DeliverSubject = deliver - } - // Do filtering always, server will clear as needed. - cfg.FilterSubject = subj - - // Pass the queue to the consumer config - if queue != _EMPTY_ { - cfg.DeliverGroup = queue - } - - // If not set, default to deliver all - if cfg.DeliverPolicy == deliverPolicyNotSet { - cfg.DeliverPolicy = DeliverAllPolicy - } - // If not set, default to ack explicit. - if cfg.AckPolicy == ackPolicyNotSet { - cfg.AckPolicy = AckExplicitPolicy - } - // If not set, default to instant - if cfg.ReplayPolicy == replayPolicyNotSet { - cfg.ReplayPolicy = ReplayInstantPolicy - } - - // If we have acks at all and the MaxAckPending is not set go ahead - // and set to the internal max for channel based consumers - if cfg.MaxAckPending == 0 && ch != nil && cfg.AckPolicy != AckNonePolicy { - cfg.MaxAckPending = cap(ch) - } - // Create request here. - ccreq = &createConsumerRequest{ - Stream: stream, - Config: &cfg, - } - hbi = cfg.Heartbeat - } - - if isPullMode { - nms = fmt.Sprintf(js.apiSubj(apiRequestNextT), stream, consumer) - deliver = nc.NewInbox() - // for pull consumers, create a wildcard subscription to differentiate pull requests - deliver += ".*" - } - - // In case this has a context, then create a child context that - // is possible to cancel via unsubscribe / drain. - var cancel func() - if ctx != nil { - ctx, cancel = context.WithCancel(ctx) - } - - jsi := &jsSub{ - js: js, - stream: stream, - consumer: consumer, - deliver: deliver, - hbi: hbi, - ordered: o.ordered, - ccreq: ccreq, - dseq: 1, - pull: isPullMode, - nms: nms, - psubj: subj, - cancel: cancel, - ackNone: o.cfg.AckPolicy == AckNonePolicy, - } - - // Auto acknowledge unless manual ack is set or policy is set to AckNonePolicy - if cb != nil && !o.mack && o.cfg.AckPolicy != AckNonePolicy { - ocb := cb - cb = func(m *Msg) { ocb(m); m.Ack() } - } - sub, err := nc.subscribe(deliver, queue, cb, ch, isSync, jsi) - if err != nil { - return nil, err - } - - // If we fail and we had the sub we need to cleanup, but can't just do a straight Unsubscribe or Drain. - // We need to clear the jsi so we do not remove any durables etc. - cleanUpSub := func() { - if sub != nil { - sub.mu.Lock() - sub.jsi = nil - sub.mu.Unlock() - sub.Unsubscribe() - } - } - - // If we are creating or updating let's process that request. - consName := o.cfg.Name - if shouldCreate { - if cfg.Durable != "" { - consName = cfg.Durable - } else if consName == "" { - consName = getHash(nuid.Next()) - } - info, err := js.upsertConsumer(stream, consName, ccreq.Config) - if err != nil { - var apiErr *APIError - if ok := errors.As(err, &apiErr); !ok { - cleanUpSub() - return nil, err - } - if consumer == _EMPTY_ || - (apiErr.ErrorCode != JSErrCodeConsumerAlreadyExists && apiErr.ErrorCode != JSErrCodeConsumerNameExists) { - cleanUpSub() - if errors.Is(apiErr, ErrStreamNotFound) { - return nil, ErrStreamNotFound - } - return nil, err - } - // We will not be using this sub here if we were push based. - if !isPullMode { - cleanUpSub() - } - - info, err = js.ConsumerInfo(stream, consumer) - if err != nil { - return nil, err - } - deliver, err = processConsInfo(info, o.cfg, isPullMode, subj, queue) - if err != nil { - return nil, err - } - - if !isPullMode { - // We can't reuse the channel, so if one was passed, we need to create a new one. - if isSync { - ch = make(chan *Msg, cap(ch)) - } else if ch != nil { - // User provided (ChanSubscription), simply try to drain it. - for done := false; !done; { - select { - case <-ch: - default: - done = true - } - } - } - jsi.deliver = deliver - jsi.hbi = info.Config.Heartbeat - - // Recreate the subscription here. - sub, err = nc.subscribe(jsi.deliver, queue, cb, ch, isSync, jsi) - if err != nil { - return nil, err - } - hasFC = info.Config.FlowControl - hasHeartbeats = info.Config.Heartbeat > 0 - } - } else { - // Since the library created the JS consumer, it will delete it on Unsubscribe()/Drain() - sub.mu.Lock() - sub.jsi.dc = true - sub.jsi.pending = info.NumPending + info.Delivered.Consumer - // If this is an ephemeral, we did not have a consumer name, we get it from the info - // after the AddConsumer returns. - if consumer == _EMPTY_ { - sub.jsi.consumer = info.Name - if isPullMode { - sub.jsi.nms = fmt.Sprintf(js.apiSubj(apiRequestNextT), stream, info.Name) - } - } - sub.mu.Unlock() - } - // Capture max ack pending from the info response here which covers both - // success and failure followed by consumer lookup. - maxap = info.Config.MaxAckPending - } - - // If maxap is greater than the default sub's pending limit, use that. - if maxap > DefaultSubPendingMsgsLimit { - // For bytes limit, use the min of maxp*1MB or DefaultSubPendingBytesLimit - bl := maxap * 1024 * 1024 - if bl < DefaultSubPendingBytesLimit { - bl = DefaultSubPendingBytesLimit - } - sub.SetPendingLimits(maxap, bl) - } - - // Do heartbeats last if needed. - if hasHeartbeats { - sub.scheduleHeartbeatCheck() - } - // For ChanSubscriptions, if we know that there is flow control, we will - // start a go routine that evaluates the number of delivered messages - // and process flow control. - if sub.Type() == ChanSubscription && hasFC { - sub.chanSubcheckForFlowControlResponse() - } - - // Wait for context to get canceled if there is one. - if ctx != nil { - go func() { - <-ctx.Done() - sub.Unsubscribe() - }() - } - - return sub, nil -} - -// InitialConsumerPending returns the number of messages pending to be -// delivered to the consumer when the subscription was created. -func (sub *Subscription) InitialConsumerPending() (uint64, error) { - sub.mu.Lock() - defer sub.mu.Unlock() - if sub.jsi == nil || sub.jsi.consumer == _EMPTY_ { - return 0, fmt.Errorf("%w: not a JetStream subscription", ErrTypeSubscription) - } - return sub.jsi.pending, nil -} - -// This long-lived routine is used per ChanSubscription to check -// on the number of delivered messages and check for flow control response. -func (sub *Subscription) chanSubcheckForFlowControlResponse() { - sub.mu.Lock() - // We don't use defer since if we need to send an RC reply, we need - // to do it outside the sub's lock. So doing explicit unlock... - if sub.closed { - sub.mu.Unlock() - return - } - var fcReply string - var nc *Conn - - jsi := sub.jsi - if jsi.csfct == nil { - jsi.csfct = time.AfterFunc(chanSubFCCheckInterval, sub.chanSubcheckForFlowControlResponse) - } else { - fcReply = sub.checkForFlowControlResponse() - nc = sub.conn - // Do the reset here under the lock, it's ok... - jsi.csfct.Reset(chanSubFCCheckInterval) - } - sub.mu.Unlock() - // This call will return an error (which we don't care here) - // if nc is nil or fcReply is empty. - nc.Publish(fcReply, nil) -} - -// ErrConsumerSequenceMismatch represents an error from a consumer -// that received a Heartbeat including sequence different to the -// one expected from the view of the client. -type ErrConsumerSequenceMismatch struct { - // StreamResumeSequence is the stream sequence from where the consumer - // should resume consuming from the stream. - StreamResumeSequence uint64 - - // ConsumerSequence is the sequence of the consumer that is behind. - ConsumerSequence uint64 - - // LastConsumerSequence is the sequence of the consumer when the heartbeat - // was received. - LastConsumerSequence uint64 -} - -func (ecs *ErrConsumerSequenceMismatch) Error() string { - return fmt.Sprintf("nats: sequence mismatch for consumer at sequence %d (%d sequences behind), should restart consumer from stream sequence %d", - ecs.ConsumerSequence, - ecs.LastConsumerSequence-ecs.ConsumerSequence, - ecs.StreamResumeSequence, - ) -} - -// isJSControlMessage will return true if this is an empty control status message -// and indicate what type of control message it is, say jsCtrlHB or jsCtrlFC -func isJSControlMessage(msg *Msg) (bool, int) { - if len(msg.Data) > 0 || msg.Header.Get(statusHdr) != controlMsg { - return false, 0 - } - val := msg.Header.Get(descrHdr) - if strings.HasPrefix(val, "Idle") { - return true, jsCtrlHB - } - if strings.HasPrefix(val, "Flow") { - return true, jsCtrlFC - } - return true, 0 -} - -// Keeps track of the incoming message's reply subject so that the consumer's -// state (deliver sequence, etc..) can be checked against heartbeats. -// We will also bump the incoming data message sequence that is used in FC cases. -// Runs under the subscription lock -func (sub *Subscription) trackSequences(reply string) { - // For flow control, keep track of incoming message sequence. - sub.jsi.fciseq++ - sub.jsi.cmeta = reply -} - -// Check to make sure messages are arriving in order. -// Returns true if the sub had to be replaced. Will cause upper layers to return. -// The caller has verified that sub.jsi != nil and that this is not a control message. -// Lock should be held. -func (sub *Subscription) checkOrderedMsgs(m *Msg) bool { - // Ignore msgs with no reply like HBs and flow control, they are handled elsewhere. - if m.Reply == _EMPTY_ { - return false - } - - // Normal message here. - tokens, err := parser.GetMetadataFields(m.Reply) - if err != nil { - return false - } - sseq, dseq := parser.ParseNum(tokens[parser.AckStreamSeqTokenPos]), parser.ParseNum(tokens[parser.AckConsumerSeqTokenPos]) - - jsi := sub.jsi - if dseq != jsi.dseq { - sub.resetOrderedConsumer(jsi.sseq + 1) - return true - } - // Update our tracking here. - jsi.dseq, jsi.sseq = dseq+1, sseq - return false -} - -// Update and replace sid. -// Lock should be held on entry but will be unlocked to prevent lock inversion. -func (sub *Subscription) applyNewSID() (osid int64) { - nc := sub.conn - sub.mu.Unlock() - - nc.subsMu.Lock() - osid = sub.sid - delete(nc.subs, osid) - // Place new one. - nc.ssid++ - nsid := nc.ssid - nc.subs[nsid] = sub - nc.subsMu.Unlock() - - sub.mu.Lock() - sub.sid = nsid - return osid -} - -// We are here if we have detected a gap with an ordered consumer. -// We will create a new consumer and rewire the low level subscription. -// Lock should be held. -func (sub *Subscription) resetOrderedConsumer(sseq uint64) { - nc := sub.conn - if sub.jsi == nil || nc == nil || sub.closed { - return - } - - var maxStr string - // If there was an AUTO_UNSUB done, we need to adjust the new value - // to send after the SUB for the new sid. - if sub.max > 0 { - if sub.jsi.fciseq < sub.max { - adjustedMax := sub.max - sub.jsi.fciseq - maxStr = strconv.Itoa(int(adjustedMax)) - } else { - // We are already at the max, so we should just unsub the - // existing sub and be done - go func(sid int64) { - nc.mu.Lock() - nc.bw.appendString(fmt.Sprintf(unsubProto, sid, _EMPTY_)) - nc.kickFlusher() - nc.mu.Unlock() - }(sub.sid) - return - } - } - - // Quick unsubscribe. Since we know this is a simple push subscriber we do in place. - osid := sub.applyNewSID() - - // Grab new inbox. - newDeliver := nc.NewInbox() - sub.Subject = newDeliver - - // Snapshot the new sid under sub lock. - nsid := sub.sid - - // We are still in the low level readLoop for the connection so we need - // to spin a go routine to try to create the new consumer. - go func() { - // Unsubscribe and subscribe with new inbox and sid. - // Remap a new low level sub into this sub since its client accessible. - // This is done here in this go routine to prevent lock inversion. - nc.mu.Lock() - nc.bw.appendString(fmt.Sprintf(unsubProto, osid, _EMPTY_)) - nc.bw.appendString(fmt.Sprintf(subProto, newDeliver, _EMPTY_, nsid)) - if maxStr != _EMPTY_ { - nc.bw.appendString(fmt.Sprintf(unsubProto, nsid, maxStr)) - } - nc.kickFlusher() - nc.mu.Unlock() - - pushErr := func(err error) { - nc.handleConsumerSequenceMismatch(sub, fmt.Errorf("%w: recreating ordered consumer", err)) - nc.unsubscribe(sub, 0, true) - } - - sub.mu.Lock() - jsi := sub.jsi - // Reset some items in jsi. - jsi.dseq = 1 - jsi.cmeta = _EMPTY_ - jsi.fcr, jsi.fcd = _EMPTY_, 0 - jsi.deliver = newDeliver - // Reset consumer request for starting policy. - cfg := jsi.ccreq.Config - cfg.DeliverSubject = newDeliver - cfg.DeliverPolicy = DeliverByStartSequencePolicy - cfg.OptStartSeq = sseq - // In case the consumer was created with a start time, we need to clear it - // since we are now using a start sequence. - cfg.OptStartTime = nil - - js := jsi.js - sub.mu.Unlock() - - consName := nuid.Next() - cinfo, err := js.upsertConsumer(jsi.stream, consName, cfg) - if err != nil { - var apiErr *APIError - if errors.Is(err, ErrJetStreamNotEnabled) || errors.Is(err, ErrTimeout) || errors.Is(err, context.DeadlineExceeded) { - // if creating consumer failed, retry - return - } else if errors.As(err, &apiErr) && apiErr.ErrorCode == JSErrCodeInsufficientResourcesErr { - // retry for insufficient resources, as it may mean that client is connected to a running - // server in cluster while the server hosting R1 JetStream resources is restarting - return - } - pushErr(err) - return - } - - sub.mu.Lock() - jsi.consumer = cinfo.Name - sub.mu.Unlock() - }() -} - -// For jetstream subscriptions, returns the number of delivered messages. -// For ChanSubscription, this value is computed based on the known number -// of messages added to the channel minus the current size of that channel. -// Lock held on entry -func (sub *Subscription) getJSDelivered() uint64 { - if sub.typ == ChanSubscription { - return sub.jsi.fciseq - uint64(len(sub.mch)) - } - return sub.delivered -} - -// checkForFlowControlResponse will check to see if we should send a flow control response -// based on the subscription current delivered index and the target. -// Runs under subscription lock -func (sub *Subscription) checkForFlowControlResponse() string { - // Caller has verified that there is a sub.jsi and fc - jsi := sub.jsi - jsi.active = true - if sub.getJSDelivered() >= jsi.fcd { - fcr := jsi.fcr - jsi.fcr, jsi.fcd = _EMPTY_, 0 - return fcr - } - return _EMPTY_ -} - -// Record an inbound flow control message. -// Runs under subscription lock -func (sub *Subscription) scheduleFlowControlResponse(reply string) { - sub.jsi.fcr, sub.jsi.fcd = reply, sub.jsi.fciseq -} - -// Checks for activity from our consumer. -// If we do not think we are active send an async error. -func (sub *Subscription) activityCheck() { - sub.mu.Lock() - jsi := sub.jsi - if jsi == nil || sub.closed { - sub.mu.Unlock() - return - } - - active := jsi.active - jsi.hbc.Reset(jsi.hbi * hbcThresh) - jsi.active = false - nc := sub.conn - sub.mu.Unlock() - - if !active { - if !jsi.ordered || nc.Status() != CONNECTED { - nc.mu.Lock() - if errCB := nc.Opts.AsyncErrorCB; errCB != nil { - nc.ach.push(func() { errCB(nc, sub, ErrConsumerNotActive) }) - } - nc.mu.Unlock() - return - } - sub.mu.Lock() - sub.resetOrderedConsumer(jsi.sseq + 1) - sub.mu.Unlock() - } -} - -// scheduleHeartbeatCheck sets up the timer check to make sure we are active -// or receiving idle heartbeats.. -func (sub *Subscription) scheduleHeartbeatCheck() { - sub.mu.Lock() - defer sub.mu.Unlock() - - jsi := sub.jsi - if jsi == nil { - return - } - - if jsi.hbc == nil { - jsi.hbc = time.AfterFunc(jsi.hbi*hbcThresh, sub.activityCheck) - } else { - jsi.hbc.Reset(jsi.hbi * hbcThresh) - } -} - -// handleConsumerSequenceMismatch will send an async error that can be used to restart a push based consumer. -func (nc *Conn) handleConsumerSequenceMismatch(sub *Subscription, err error) { - nc.mu.Lock() - errCB := nc.Opts.AsyncErrorCB - if errCB != nil { - nc.ach.push(func() { errCB(nc, sub, err) }) - } - nc.mu.Unlock() -} - -// checkForSequenceMismatch will make sure we have not missed any messages since last seen. -func (nc *Conn) checkForSequenceMismatch(msg *Msg, s *Subscription, jsi *jsSub) { - // Process heartbeat received, get latest control metadata if present. - s.mu.Lock() - ctrl, ordered := jsi.cmeta, jsi.ordered - jsi.active = true - s.mu.Unlock() - - if ctrl == _EMPTY_ { - return - } - - tokens, err := parser.GetMetadataFields(ctrl) - if err != nil { - return - } - - // Consumer sequence. - var ldseq string - dseq := tokens[parser.AckConsumerSeqTokenPos] - hdr := msg.Header[lastConsumerSeqHdr] - if len(hdr) == 1 { - ldseq = hdr[0] - } - - // Detect consumer sequence mismatch and whether - // should restart the consumer. - if ldseq != dseq { - // Dispatch async error including details such as - // from where the consumer could be restarted. - sseq := parser.ParseNum(tokens[parser.AckStreamSeqTokenPos]) - if ordered { - s.mu.Lock() - s.resetOrderedConsumer(jsi.sseq + 1) - s.mu.Unlock() - } else { - ecs := &ErrConsumerSequenceMismatch{ - StreamResumeSequence: uint64(sseq), - ConsumerSequence: parser.ParseNum(dseq), - LastConsumerSequence: parser.ParseNum(ldseq), - } - nc.handleConsumerSequenceMismatch(s, ecs) - } - } -} - -type streamRequest struct { - Subject string `json:"subject,omitempty"` -} - -type streamNamesResponse struct { - apiResponse - apiPaged - Streams []string `json:"streams"` -} - -type subOpts struct { - // For attaching. - stream, consumer string - // For creating or updating. - cfg *ConsumerConfig - // For binding a subscription to a consumer without creating it. - bound bool - // For manual ack - mack bool - // For an ordered consumer. - ordered bool - ctx context.Context - - // To disable calling ConsumerInfo - skipCInfo bool -} - -// SkipConsumerLookup will omit looking up consumer when [Bind], [Durable] -// or [ConsumerName] are provided. -// -// NOTE: This setting may cause an existing consumer to be overwritten. Also, -// because consumer lookup is skipped, all consumer options like AckPolicy, -// DeliverSubject etc. need to be provided even if consumer already exists. -func SkipConsumerLookup() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.skipCInfo = true - return nil - }) -} - -// OrderedConsumer will create a FIFO direct/ephemeral consumer for in order delivery of messages. -// There are no redeliveries and no acks, and flow control and heartbeats will be added but -// will be taken care of without additional client code. -func OrderedConsumer() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.ordered = true - return nil - }) -} - -// ManualAck disables auto ack functionality for async subscriptions. -func ManualAck() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.mack = true - return nil - }) -} - -// Description will set the description for the created consumer. -func Description(description string) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.Description = description - return nil - }) -} - -// Durable defines the consumer name for JetStream durable subscribers. -// This function will return ErrInvalidConsumerName if the name contains -// any dot ".". -func Durable(consumer string) SubOpt { - return subOptFn(func(opts *subOpts) error { - if opts.cfg.Durable != _EMPTY_ { - return fmt.Errorf("nats: option Durable set more than once") - } - if opts.consumer != _EMPTY_ && opts.consumer != consumer { - return fmt.Errorf("nats: duplicate consumer names (%s and %s)", opts.consumer, consumer) - } - if err := checkConsumerName(consumer); err != nil { - return err - } - - opts.cfg.Durable = consumer - return nil - }) -} - -// DeliverAll will configure a Consumer to receive all the -// messages from a Stream. -func DeliverAll() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.DeliverPolicy = DeliverAllPolicy - return nil - }) -} - -// DeliverLast configures a Consumer to receive messages -// starting with the latest one. -func DeliverLast() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.DeliverPolicy = DeliverLastPolicy - return nil - }) -} - -// DeliverLastPerSubject configures a Consumer to receive messages -// starting with the latest one for each filtered subject. -func DeliverLastPerSubject() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.DeliverPolicy = DeliverLastPerSubjectPolicy - return nil - }) -} - -// DeliverNew configures a Consumer to receive messages -// published after the subscription. -func DeliverNew() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.DeliverPolicy = DeliverNewPolicy - return nil - }) -} - -// StartSequence configures a Consumer to receive -// messages from a start sequence. -func StartSequence(seq uint64) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.DeliverPolicy = DeliverByStartSequencePolicy - opts.cfg.OptStartSeq = seq - return nil - }) -} - -// StartTime configures a Consumer to receive -// messages from a start time. -func StartTime(startTime time.Time) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.DeliverPolicy = DeliverByStartTimePolicy - opts.cfg.OptStartTime = &startTime - return nil - }) -} - -// AckNone requires no acks for delivered messages. -func AckNone() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.AckPolicy = AckNonePolicy - return nil - }) -} - -// AckAll when acking a sequence number, this implicitly acks all sequences -// below this one as well. -func AckAll() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.AckPolicy = AckAllPolicy - return nil - }) -} - -// AckExplicit requires ack or nack for all messages. -func AckExplicit() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.AckPolicy = AckExplicitPolicy - return nil - }) -} - -// MaxDeliver sets the number of redeliveries for a message. -func MaxDeliver(n int) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.MaxDeliver = n - return nil - }) -} - -// MaxAckPending sets the number of outstanding acks that are allowed before -// message delivery is halted. -func MaxAckPending(n int) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.MaxAckPending = n - return nil - }) -} - -// ReplayOriginal replays the messages at the original speed. -func ReplayOriginal() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.ReplayPolicy = ReplayOriginalPolicy - return nil - }) -} - -// ReplayInstant replays the messages as fast as possible. -func ReplayInstant() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.ReplayPolicy = ReplayInstantPolicy - return nil - }) -} - -// RateLimit is the Bits per sec rate limit applied to a push consumer. -func RateLimit(n uint64) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.RateLimit = n - return nil - }) -} - -// BackOff is an array of time durations that represent the time to delay based on delivery count. -func BackOff(backOff []time.Duration) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.BackOff = backOff - return nil - }) -} - -// BindStream binds a consumer to a stream explicitly based on a name. -// When a stream name is not specified, the library uses the subscribe -// subject as a way to find the stream name. It is done by making a request -// to the server to get list of stream names that have a filter for this -// subject. If the returned list contains a single stream, then this -// stream name will be used, otherwise the `ErrNoMatchingStream` is returned. -// To avoid the stream lookup, provide the stream name with this function. -// See also `Bind()`. -func BindStream(stream string) SubOpt { - return subOptFn(func(opts *subOpts) error { - if opts.stream != _EMPTY_ && opts.stream != stream { - return fmt.Errorf("nats: duplicate stream name (%s and %s)", opts.stream, stream) - } - - opts.stream = stream - return nil - }) -} - -// Bind binds a subscription to an existing consumer from a stream without attempting to create. -// The first argument is the stream name and the second argument will be the consumer name. -func Bind(stream, consumer string) SubOpt { - return subOptFn(func(opts *subOpts) error { - if stream == _EMPTY_ { - return ErrStreamNameRequired - } - if consumer == _EMPTY_ { - return ErrConsumerNameRequired - } - - // In case of pull subscribers, the durable name is a required parameter - // so check that they are not different. - if opts.cfg.Durable != _EMPTY_ && opts.cfg.Durable != consumer { - return fmt.Errorf("nats: duplicate consumer names (%s and %s)", opts.cfg.Durable, consumer) - } - if opts.stream != _EMPTY_ && opts.stream != stream { - return fmt.Errorf("nats: duplicate stream name (%s and %s)", opts.stream, stream) - } - opts.stream = stream - opts.consumer = consumer - opts.bound = true - return nil - }) -} - -// EnableFlowControl enables flow control for a push based consumer. -func EnableFlowControl() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.FlowControl = true - return nil - }) -} - -// IdleHeartbeat enables push based consumers to have idle heartbeats delivered. -// For pull consumers, idle heartbeat has to be set on each [Fetch] call. -func IdleHeartbeat(duration time.Duration) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.Heartbeat = duration - return nil - }) -} - -// DeliverSubject specifies the JetStream consumer deliver subject. -// -// This option is used only in situations where the consumer does not exist -// and a creation request is sent to the server. If not provided, an inbox -// will be selected. -// If a consumer exists, then the NATS subscription will be created on -// the JetStream consumer's DeliverSubject, not necessarily this subject. -func DeliverSubject(subject string) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.DeliverSubject = subject - return nil - }) -} - -// HeadersOnly() will instruct the consumer to only deliver headers and no payloads. -func HeadersOnly() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.HeadersOnly = true - return nil - }) -} - -// MaxRequestBatch sets the maximum pull consumer batch size that a Fetch() -// can request. -func MaxRequestBatch(max int) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.MaxRequestBatch = max - return nil - }) -} - -// MaxRequestExpires sets the maximum pull consumer request expiration that a -// Fetch() can request (using the Fetch's timeout value). -func MaxRequestExpires(max time.Duration) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.MaxRequestExpires = max - return nil - }) -} - -// MaxRequesMaxBytes sets the maximum pull consumer request bytes that a -// Fetch() can receive. -func MaxRequestMaxBytes(bytes int) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.MaxRequestMaxBytes = bytes - return nil - }) -} - -// InactiveThreshold indicates how long the server should keep a consumer -// after detecting a lack of activity. In NATS Server 2.8.4 and earlier, this -// option only applies to ephemeral consumers. In NATS Server 2.9.0 and later, -// this option applies to both ephemeral and durable consumers, allowing durable -// consumers to also be deleted automatically after the inactivity threshold has -// passed. -func InactiveThreshold(threshold time.Duration) SubOpt { - return subOptFn(func(opts *subOpts) error { - if threshold < 0 { - return fmt.Errorf("invalid InactiveThreshold value (%v), needs to be greater or equal to 0", threshold) - } - opts.cfg.InactiveThreshold = threshold - return nil - }) -} - -// ConsumerReplicas sets the number of replica count for a consumer. -func ConsumerReplicas(replicas int) SubOpt { - return subOptFn(func(opts *subOpts) error { - if replicas < 1 { - return fmt.Errorf("invalid ConsumerReplicas value (%v), needs to be greater than 0", replicas) - } - opts.cfg.Replicas = replicas - return nil - }) -} - -// ConsumerMemoryStorage sets the memory storage to true for a consumer. -func ConsumerMemoryStorage() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.MemoryStorage = true - return nil - }) -} - -// ConsumerName sets the name for a consumer. -func ConsumerName(name string) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.Name = name - return nil - }) -} - -// ConsumerFilterSubjects can be used to set multiple subject filters on the consumer. -// It has to be used in conjunction with [nats.BindStream] and -// with empty 'subject' parameter. -func ConsumerFilterSubjects(subjects ...string) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.FilterSubjects = subjects - return nil - }) -} - -func (sub *Subscription) ConsumerInfo() (*ConsumerInfo, error) { - sub.mu.Lock() - // TODO(dlc) - Better way to mark especially if we attach. - if sub.jsi == nil || sub.jsi.consumer == _EMPTY_ { - sub.mu.Unlock() - return nil, ErrTypeSubscription - } - - // Consumer info lookup should fail if in direct mode. - js := sub.jsi.js - stream, consumer := sub.jsi.stream, sub.jsi.consumer - sub.mu.Unlock() - - return js.getConsumerInfo(stream, consumer) -} - -type pullOpts struct { - maxBytes int - ttl time.Duration - ctx context.Context - hb time.Duration -} - -// PullOpt are the options that can be passed when pulling a batch of messages. -type PullOpt interface { - configurePull(opts *pullOpts) error -} - -// PullMaxWaiting defines the max inflight pull requests. -func PullMaxWaiting(n int) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.MaxWaiting = n - return nil - }) -} - -type PullHeartbeat time.Duration - -func (h PullHeartbeat) configurePull(opts *pullOpts) error { - if h <= 0 { - return fmt.Errorf("%w: idle heartbeat has to be greater than 0", ErrInvalidArg) - } - opts.hb = time.Duration(h) - return nil -} - -// PullMaxBytes defines the max bytes allowed for a fetch request. -type PullMaxBytes int - -func (n PullMaxBytes) configurePull(opts *pullOpts) error { - opts.maxBytes = int(n) - return nil -} - -var ( - // errNoMessages is an error that a Fetch request using no_wait can receive to signal - // that there are no more messages available. - errNoMessages = errors.New("nats: no messages") - - // errRequestsPending is an error that represents a sub.Fetch requests that was using - // no_wait and expires time got discarded by the server. - errRequestsPending = errors.New("nats: requests pending") -) - -// Returns if the given message is a user message or not, and if -// `checkSts` is true, returns appropriate error based on the -// content of the status (404, etc..) -func checkMsg(msg *Msg, checkSts, isNoWait bool) (usrMsg bool, err error) { - // Assume user message - usrMsg = true - - // If payload or no header, consider this a user message - if len(msg.Data) > 0 || len(msg.Header) == 0 { - return - } - // Look for status header - val := msg.Header.Get(statusHdr) - // If not present, then this is considered a user message - if val == _EMPTY_ { - return - } - // At this point, this is not a user message since there is - // no payload and a "Status" header. - usrMsg = false - - // If we don't care about status, we are done. - if !checkSts { - return - } - - // if it's a heartbeat message, report as not user msg - if isHb, _ := isJSControlMessage(msg); isHb { - return - } - switch val { - case noResponders: - err = ErrNoResponders - case noMessagesSts: - // 404 indicates that there are no messages. - err = errNoMessages - case reqTimeoutSts: - // In case of a fetch request with no wait request and expires time, - // need to skip 408 errors and retry. - if isNoWait { - err = errRequestsPending - } else { - // Older servers may send a 408 when a request in the server was expired - // and interest is still found, which will be the case for our - // implementation. Regardless, ignore 408 errors until receiving at least - // one message when making requests without no_wait. - err = ErrTimeout - } - case jetStream409Sts: - if strings.Contains(strings.ToLower(msg.Header.Get(descrHdr)), "consumer deleted") { - err = ErrConsumerDeleted - break - } - - if strings.Contains(strings.ToLower(msg.Header.Get(descrHdr)), "leadership change") { - err = ErrConsumerLeadershipChanged - break - } - fallthrough - default: - err = fmt.Errorf("nats: %s", msg.Header.Get(descrHdr)) - } - return -} - -// Fetch pulls a batch of messages from a stream for a pull consumer. -func (sub *Subscription) Fetch(batch int, opts ...PullOpt) ([]*Msg, error) { - if sub == nil { - return nil, ErrBadSubscription - } - if batch < 1 { - return nil, ErrInvalidArg - } - - var o pullOpts - for _, opt := range opts { - if err := opt.configurePull(&o); err != nil { - return nil, err - } - } - if o.ctx != nil && o.ttl != 0 { - return nil, ErrContextAndTimeout - } - - sub.mu.Lock() - jsi := sub.jsi - // Reject if this is not a pull subscription. Note that sub.typ is SyncSubscription, - // so check for jsi.pull boolean instead. - if jsi == nil || !jsi.pull { - sub.mu.Unlock() - return nil, ErrTypeSubscription - } - - nc := sub.conn - nms := sub.jsi.nms - rply, _ := newFetchInbox(jsi.deliver) - js := sub.jsi.js - pmc := len(sub.mch) > 0 - - // All fetch requests have an expiration, in case of no explicit expiration - // then the default timeout of the JetStream context is used. - ttl := o.ttl - if ttl == 0 { - ttl = js.opts.wait - } - sub.mu.Unlock() - - // Use the given context or setup a default one for the span - // of the pull batch request. - var ( - ctx = o.ctx - err error - cancel context.CancelFunc - ) - if ctx == nil { - ctx, cancel = context.WithTimeout(context.Background(), ttl) - } else if _, hasDeadline := ctx.Deadline(); !hasDeadline { - // Prevent from passing the background context which will just block - // and cannot be canceled either. - if octx, ok := ctx.(ContextOpt); ok && octx.Context == context.Background() { - return nil, ErrNoDeadlineContext - } - - // If the context did not have a deadline, then create a new child context - // that will use the default timeout from the JS context. - ctx, cancel = context.WithTimeout(ctx, ttl) - } else { - ctx, cancel = context.WithCancel(ctx) - } - defer cancel() - - // if heartbeat is set, validate it against the context timeout - if o.hb > 0 { - deadline, _ := ctx.Deadline() - if 2*o.hb >= time.Until(deadline) { - return nil, fmt.Errorf("%w: idle heartbeat value too large", ErrInvalidArg) - } - } - - // Check if context not done already before making the request. - select { - case <-ctx.Done(): - if o.ctx != nil { // Timeout or Cancel triggered by context object option - err = ctx.Err() - } else { // Timeout triggered by timeout option - err = ErrTimeout - } - default: - } - if err != nil { - return nil, err - } - - var ( - msgs = make([]*Msg, 0, batch) - msg *Msg - ) - for pmc && len(msgs) < batch { - // Check next msg with booleans that say that this is an internal call - // for a pull subscribe (so don't reject it) and don't wait if there - // are no messages. - msg, err = sub.nextMsgWithContext(ctx, true, false) - if err != nil { - if err == errNoMessages { - err = nil - } - break - } - // Check msg but just to determine if this is a user message - // or status message, however, we don't care about values of status - // messages at this point in the Fetch() call, so checkMsg can't - // return an error. - if usrMsg, _ := checkMsg(msg, false, false); usrMsg { - msgs = append(msgs, msg) - } - } - var hbTimer *time.Timer - var hbErr error - if err == nil && len(msgs) < batch { - // For batch real size of 1, it does not make sense to set no_wait in - // the request. - noWait := batch-len(msgs) > 1 - - var nr nextRequest - - sendReq := func() error { - // The current deadline for the context will be used - // to set the expires TTL for a fetch request. - deadline, _ := ctx.Deadline() - ttl = time.Until(deadline) - - // Check if context has already been canceled or expired. - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - // Make our request expiration a bit shorter than the current timeout. - expires := ttl - if ttl >= 20*time.Millisecond { - expires = ttl - 10*time.Millisecond - } - - nr.Batch = batch - len(msgs) - nr.Expires = expires - nr.NoWait = noWait - nr.MaxBytes = o.maxBytes - if 2*o.hb < expires { - nr.Heartbeat = o.hb - } else { - nr.Heartbeat = 0 - } - req, _ := json.Marshal(nr) - if err := nc.PublishRequest(nms, rply, req); err != nil { - return err - } - if o.hb > 0 { - if hbTimer == nil { - hbTimer = time.AfterFunc(2*o.hb, func() { - hbErr = ErrNoHeartbeat - cancel() - }) - } else { - hbTimer.Reset(2 * o.hb) - } - } - return nil - } - - err = sendReq() - for err == nil && len(msgs) < batch { - // Ask for next message and wait if there are no messages - msg, err = sub.nextMsgWithContext(ctx, true, true) - if err == nil { - if hbTimer != nil { - hbTimer.Reset(2 * o.hb) - } - var usrMsg bool - - usrMsg, err = checkMsg(msg, true, noWait) - if err == nil && usrMsg { - msgs = append(msgs, msg) - } else if noWait && (err == errNoMessages || err == errRequestsPending) && len(msgs) == 0 { - // If we have a 404/408 for our "no_wait" request and have - // not collected any message, then resend request to - // wait this time. - noWait = false - err = sendReq() - } else if err == ErrTimeout && len(msgs) == 0 { - // If we get a 408, we will bail if we already collected some - // messages, otherwise ignore and go back calling nextMsg. - err = nil - } - } - } - if hbTimer != nil { - hbTimer.Stop() - } - } - // If there is at least a message added to msgs, then need to return OK and no error - if err != nil && len(msgs) == 0 { - if hbErr != nil { - return nil, hbErr - } - return nil, o.checkCtxErr(err) - } - return msgs, nil -} - -// newFetchInbox returns subject used as reply subject when sending pull requests -// as well as request ID. For non-wildcard subject, request ID is empty and -// passed subject is not transformed -func newFetchInbox(subj string) (string, string) { - if !strings.HasSuffix(subj, ".*") { - return subj, "" - } - reqID := nuid.Next() - var sb strings.Builder - sb.WriteString(subj[:len(subj)-1]) - sb.WriteString(reqID) - return sb.String(), reqID -} - -func subjectMatchesReqID(subject, reqID string) bool { - subjectParts := strings.Split(subject, ".") - if len(subjectParts) < 2 { - return false - } - return subjectParts[len(subjectParts)-1] == reqID -} - -// MessageBatch provides methods to retrieve messages consumed using [Subscribe.FetchBatch]. -type MessageBatch interface { - // Messages returns a channel on which messages will be published. - Messages() <-chan *Msg - - // Error returns an error encountered when fetching messages. - Error() error - - // Done signals end of execution. - Done() <-chan struct{} -} - -type messageBatch struct { - msgs chan *Msg - err error - done chan struct{} -} - -func (mb *messageBatch) Messages() <-chan *Msg { - return mb.msgs -} - -func (mb *messageBatch) Error() error { - return mb.err -} - -func (mb *messageBatch) Done() <-chan struct{} { - return mb.done -} - -// FetchBatch pulls a batch of messages from a stream for a pull consumer. -// Unlike [Subscription.Fetch], it is non blocking and returns [MessageBatch], -// allowing to retrieve incoming messages from a channel. -// The returned channel is always closed after all messages for a batch have been -// delivered by the server - it is safe to iterate over it using range. -// -// To avoid using default JetStream timeout as fetch expiry time, use [nats.MaxWait] -// or [nats.Context] (with deadline set). -// -// This method will not return error in case of pull request expiry (even if there are no messages). -// Any other error encountered when receiving messages will cause FetchBatch to stop receiving new messages. -func (sub *Subscription) FetchBatch(batch int, opts ...PullOpt) (MessageBatch, error) { - if sub == nil { - return nil, ErrBadSubscription - } - if batch < 1 { - return nil, ErrInvalidArg - } - - var o pullOpts - for _, opt := range opts { - if err := opt.configurePull(&o); err != nil { - return nil, err - } - } - if o.ctx != nil && o.ttl != 0 { - return nil, ErrContextAndTimeout - } - sub.mu.Lock() - jsi := sub.jsi - // Reject if this is not a pull subscription. Note that sub.typ is SyncSubscription, - // so check for jsi.pull boolean instead. - if jsi == nil || !jsi.pull { - sub.mu.Unlock() - return nil, ErrTypeSubscription - } - - nc := sub.conn - nms := sub.jsi.nms - rply, reqID := newFetchInbox(sub.jsi.deliver) - js := sub.jsi.js - pmc := len(sub.mch) > 0 - - // All fetch requests have an expiration, in case of no explicit expiration - // then the default timeout of the JetStream context is used. - ttl := o.ttl - if ttl == 0 { - ttl = js.opts.wait - } - sub.mu.Unlock() - - // Use the given context or setup a default one for the span - // of the pull batch request. - var ( - ctx = o.ctx - cancel context.CancelFunc - cancelContext = true - ) - if ctx == nil { - ctx, cancel = context.WithTimeout(context.Background(), ttl) - } else if _, hasDeadline := ctx.Deadline(); !hasDeadline { - // Prevent from passing the background context which will just block - // and cannot be canceled either. - if octx, ok := ctx.(ContextOpt); ok && octx.Context == context.Background() { - return nil, ErrNoDeadlineContext - } - - // If the context did not have a deadline, then create a new child context - // that will use the default timeout from the JS context. - ctx, cancel = context.WithTimeout(ctx, ttl) - } else { - ctx, cancel = context.WithCancel(ctx) - } - defer func() { - // only cancel the context here if we are sure the fetching goroutine has not been started yet - if cancelContext { - cancel() - } - }() - - // if heartbeat is set, validate it against the context timeout - if o.hb > 0 { - deadline, _ := ctx.Deadline() - if 2*o.hb >= time.Until(deadline) { - return nil, fmt.Errorf("%w: idle heartbeat value too large", ErrInvalidArg) - } - } - - // Check if context not done already before making the request. - select { - case <-ctx.Done(): - if o.ctx != nil { // Timeout or Cancel triggered by context object option - return nil, ctx.Err() - } else { // Timeout triggered by timeout option - return nil, ErrTimeout - } - default: - } - - result := &messageBatch{ - msgs: make(chan *Msg, batch), - done: make(chan struct{}, 1), - } - var msg *Msg - for pmc && len(result.msgs) < batch { - // Check next msg with booleans that say that this is an internal call - // for a pull subscribe (so don't reject it) and don't wait if there - // are no messages. - msg, err := sub.nextMsgWithContext(ctx, true, false) - if err != nil { - if err == errNoMessages { - err = nil - } - result.err = err - break - } - // Check msg but just to determine if this is a user message - // or status message, however, we don't care about values of status - // messages at this point in the Fetch() call, so checkMsg can't - // return an error. - if usrMsg, _ := checkMsg(msg, false, false); usrMsg { - result.msgs <- msg - } - } - if len(result.msgs) == batch || result.err != nil { - close(result.msgs) - result.done <- struct{}{} - return result, nil - } - - deadline, _ := ctx.Deadline() - ttl = time.Until(deadline) - - // Make our request expiration a bit shorter than the current timeout. - expires := ttl - if ttl >= 20*time.Millisecond { - expires = ttl - 10*time.Millisecond - } - - requestBatch := batch - len(result.msgs) - req := nextRequest{ - Expires: expires, - Batch: requestBatch, - MaxBytes: o.maxBytes, - Heartbeat: o.hb, - } - reqJSON, err := json.Marshal(req) - if err != nil { - close(result.msgs) - result.done <- struct{}{} - result.err = err - return result, nil - } - if err := nc.PublishRequest(nms, rply, reqJSON); err != nil { - if len(result.msgs) == 0 { - return nil, err - } - close(result.msgs) - result.done <- struct{}{} - result.err = err - return result, nil - } - var hbTimer *time.Timer - var hbErr error - if o.hb > 0 { - hbTimer = time.AfterFunc(2*o.hb, func() { - hbErr = ErrNoHeartbeat - cancel() - }) - } - cancelContext = false - go func() { - defer cancel() - var requestMsgs int - for requestMsgs < requestBatch { - // Ask for next message and wait if there are no messages - msg, err = sub.nextMsgWithContext(ctx, true, true) - if err != nil { - break - } - if hbTimer != nil { - hbTimer.Reset(2 * o.hb) - } - var usrMsg bool - - usrMsg, err = checkMsg(msg, true, false) - if err != nil { - if err == ErrTimeout { - if reqID != "" && !subjectMatchesReqID(msg.Subject, reqID) { - // ignore timeout message from server if it comes from a different pull request - continue - } - err = nil - } - break - } - if usrMsg { - result.msgs <- msg - requestMsgs++ - } - } - if err != nil { - if hbErr != nil { - result.err = hbErr - } else { - result.err = o.checkCtxErr(err) - } - } - close(result.msgs) - result.done <- struct{}{} - }() - return result, nil -} - -// checkCtxErr is used to determine whether ErrTimeout should be returned in case of context timeout -func (o *pullOpts) checkCtxErr(err error) error { - if o.ctx == nil && err == context.DeadlineExceeded { - return ErrTimeout - } - return err -} - -func (js *js) getConsumerInfo(stream, consumer string) (*ConsumerInfo, error) { - ctx, cancel := context.WithTimeout(context.Background(), js.opts.wait) - defer cancel() - return js.getConsumerInfoContext(ctx, stream, consumer) -} - -func (js *js) getConsumerInfoContext(ctx context.Context, stream, consumer string) (*ConsumerInfo, error) { - ccInfoSubj := fmt.Sprintf(apiConsumerInfoT, stream, consumer) - resp, err := js.apiRequestWithContext(ctx, js.apiSubj(ccInfoSubj), nil) - if err != nil { - if err == ErrNoResponders { - err = ErrJetStreamNotEnabled - } - return nil, err - } - - var info consumerResponse - if err := json.Unmarshal(resp.Data, &info); err != nil { - return nil, err - } - if info.Error != nil { - if errors.Is(info.Error, ErrConsumerNotFound) { - return nil, ErrConsumerNotFound - } - if errors.Is(info.Error, ErrStreamNotFound) { - return nil, ErrStreamNotFound - } - return nil, info.Error - } - return info.ConsumerInfo, nil -} - -// a RequestWithContext with tracing via TraceCB -func (js *js) apiRequestWithContext(ctx context.Context, subj string, data []byte) (*Msg, error) { - if js.opts.shouldTrace { - ctrace := js.opts.ctrace - if ctrace.RequestSent != nil { - ctrace.RequestSent(subj, data) - } - } - resp, err := js.nc.RequestWithContext(ctx, subj, data) - if err != nil { - return nil, err - } - if js.opts.shouldTrace { - ctrace := js.opts.ctrace - if ctrace.RequestSent != nil { - ctrace.ResponseReceived(subj, resp.Data, resp.Header) - } - } - - return resp, nil -} - -func (m *Msg) checkReply() error { - if m == nil || m.Sub == nil { - return ErrMsgNotBound - } - if m.Reply == _EMPTY_ { - return ErrMsgNoReply - } - return nil -} - -// ackReply handles all acks. Will do the right thing for pull and sync mode. -// It ensures that an ack is only sent a single time, regardless of -// how many times it is being called to avoid duplicated acks. -func (m *Msg) ackReply(ackType []byte, sync bool, opts ...AckOpt) error { - var o ackOpts - for _, opt := range opts { - if err := opt.configureAck(&o); err != nil { - return err - } - } - - if err := m.checkReply(); err != nil { - return err - } - - var ackNone bool - var js *js - - sub := m.Sub - sub.mu.Lock() - nc := sub.conn - if jsi := sub.jsi; jsi != nil { - js = jsi.js - ackNone = jsi.ackNone - } - sub.mu.Unlock() - - // Skip if already acked. - if atomic.LoadUint32(&m.ackd) == 1 { - return ErrMsgAlreadyAckd - } - if ackNone { - return ErrCantAckIfConsumerAckNone - } - - usesCtx := o.ctx != nil - usesWait := o.ttl > 0 - - // Only allow either AckWait or Context option to set the timeout. - if usesWait && usesCtx { - return ErrContextAndTimeout - } - - sync = sync || usesCtx || usesWait - ctx := o.ctx - wait := defaultRequestWait - if usesWait { - wait = o.ttl - } else if js != nil { - wait = js.opts.wait - } - - var body []byte - var err error - // This will be > 0 only when called from NakWithDelay() - if o.nakDelay > 0 { - body = []byte(fmt.Sprintf("%s {\"delay\": %d}", ackType, o.nakDelay.Nanoseconds())) - } else { - body = ackType - } - - if sync { - if usesCtx { - _, err = nc.RequestWithContext(ctx, m.Reply, body) - } else { - _, err = nc.Request(m.Reply, body, wait) - } - } else { - err = nc.Publish(m.Reply, body) - } - - // Mark that the message has been acked unless it is ackProgress - // which can be sent many times. - if err == nil && !bytes.Equal(ackType, ackProgress) { - atomic.StoreUint32(&m.ackd, 1) - } - - return err -} - -// Ack acknowledges a message. This tells the server that the message was -// successfully processed and it can move on to the next message. -func (m *Msg) Ack(opts ...AckOpt) error { - return m.ackReply(ackAck, false, opts...) -} - -// AckSync is the synchronous version of Ack. This indicates successful message -// processing. -func (m *Msg) AckSync(opts ...AckOpt) error { - return m.ackReply(ackAck, true, opts...) -} - -// Nak negatively acknowledges a message. This tells the server to redeliver -// the message. You can configure the number of redeliveries by passing -// nats.MaxDeliver when you Subscribe. The default is infinite redeliveries. -func (m *Msg) Nak(opts ...AckOpt) error { - return m.ackReply(ackNak, false, opts...) -} - -// Nak negatively acknowledges a message. This tells the server to redeliver -// the message after the give `delay` duration. You can configure the number -// of redeliveries by passing nats.MaxDeliver when you Subscribe. -// The default is infinite redeliveries. -func (m *Msg) NakWithDelay(delay time.Duration, opts ...AckOpt) error { - if delay > 0 { - opts = append(opts, nakDelay(delay)) - } - return m.ackReply(ackNak, false, opts...) -} - -// Term tells the server to not redeliver this message, regardless of the value -// of nats.MaxDeliver. -func (m *Msg) Term(opts ...AckOpt) error { - return m.ackReply(ackTerm, false, opts...) -} - -// InProgress tells the server that this message is being worked on. It resets -// the redelivery timer on the server. -func (m *Msg) InProgress(opts ...AckOpt) error { - return m.ackReply(ackProgress, false, opts...) -} - -// MsgMetadata is the JetStream metadata associated with received messages. -type MsgMetadata struct { - Sequence SequencePair - NumDelivered uint64 - NumPending uint64 - Timestamp time.Time - Stream string - Consumer string - Domain string -} - -// Metadata retrieves the metadata from a JetStream message. This method will -// return an error for non-JetStream Msgs. -func (m *Msg) Metadata() (*MsgMetadata, error) { - if err := m.checkReply(); err != nil { - return nil, err - } - - tokens, err := parser.GetMetadataFields(m.Reply) - if err != nil { - return nil, err - } - - meta := &MsgMetadata{ - Domain: tokens[parser.AckDomainTokenPos], - NumDelivered: parser.ParseNum(tokens[parser.AckNumDeliveredTokenPos]), - NumPending: parser.ParseNum(tokens[parser.AckNumPendingTokenPos]), - Timestamp: time.Unix(0, int64(parser.ParseNum(tokens[parser.AckTimestampSeqTokenPos]))), - Stream: tokens[parser.AckStreamTokenPos], - Consumer: tokens[parser.AckConsumerTokenPos], - } - meta.Sequence.Stream = parser.ParseNum(tokens[parser.AckStreamSeqTokenPos]) - meta.Sequence.Consumer = parser.ParseNum(tokens[parser.AckConsumerSeqTokenPos]) - return meta, nil -} - -// AckPolicy determines how the consumer should acknowledge delivered messages. -type AckPolicy int - -const ( - // AckNonePolicy requires no acks for delivered messages. - AckNonePolicy AckPolicy = iota - - // AckAllPolicy when acking a sequence number, this implicitly acks all - // sequences below this one as well. - AckAllPolicy - - // AckExplicitPolicy requires ack or nack for all messages. - AckExplicitPolicy - - // For configuration mismatch check - ackPolicyNotSet = 99 -) - -func jsonString(s string) string { - return "\"" + s + "\"" -} - -func (p *AckPolicy) UnmarshalJSON(data []byte) error { - switch string(data) { - case jsonString("none"): - *p = AckNonePolicy - case jsonString("all"): - *p = AckAllPolicy - case jsonString("explicit"): - *p = AckExplicitPolicy - default: - return fmt.Errorf("nats: can not unmarshal %q", data) - } - - return nil -} - -func (p AckPolicy) MarshalJSON() ([]byte, error) { - switch p { - case AckNonePolicy: - return json.Marshal("none") - case AckAllPolicy: - return json.Marshal("all") - case AckExplicitPolicy: - return json.Marshal("explicit") - default: - return nil, fmt.Errorf("nats: unknown acknowledgement policy %v", p) - } -} - -func (p AckPolicy) String() string { - switch p { - case AckNonePolicy: - return "AckNone" - case AckAllPolicy: - return "AckAll" - case AckExplicitPolicy: - return "AckExplicit" - case ackPolicyNotSet: - return "Not Initialized" - default: - return "Unknown AckPolicy" - } -} - -// ReplayPolicy determines how the consumer should replay messages it already has queued in the stream. -type ReplayPolicy int - -const ( - // ReplayInstantPolicy will replay messages as fast as possible. - ReplayInstantPolicy ReplayPolicy = iota - - // ReplayOriginalPolicy will maintain the same timing as the messages were received. - ReplayOriginalPolicy - - // For configuration mismatch check - replayPolicyNotSet = 99 -) - -func (p *ReplayPolicy) UnmarshalJSON(data []byte) error { - switch string(data) { - case jsonString("instant"): - *p = ReplayInstantPolicy - case jsonString("original"): - *p = ReplayOriginalPolicy - default: - return fmt.Errorf("nats: can not unmarshal %q", data) - } - - return nil -} - -func (p ReplayPolicy) MarshalJSON() ([]byte, error) { - switch p { - case ReplayOriginalPolicy: - return json.Marshal("original") - case ReplayInstantPolicy: - return json.Marshal("instant") - default: - return nil, fmt.Errorf("nats: unknown replay policy %v", p) - } -} - -var ( - ackAck = []byte("+ACK") - ackNak = []byte("-NAK") - ackProgress = []byte("+WPI") - ackTerm = []byte("+TERM") -) - -// DeliverPolicy determines how the consumer should select the first message to deliver. -type DeliverPolicy int - -const ( - // DeliverAllPolicy starts delivering messages from the very beginning of a - // stream. This is the default. - DeliverAllPolicy DeliverPolicy = iota - - // DeliverLastPolicy will start the consumer with the last sequence - // received. - DeliverLastPolicy - - // DeliverNewPolicy will only deliver new messages that are sent after the - // consumer is created. - DeliverNewPolicy - - // DeliverByStartSequencePolicy will deliver messages starting from a given - // sequence. - DeliverByStartSequencePolicy - - // DeliverByStartTimePolicy will deliver messages starting from a given - // time. - DeliverByStartTimePolicy - - // DeliverLastPerSubjectPolicy will start the consumer with the last message - // for all subjects received. - DeliverLastPerSubjectPolicy - - // For configuration mismatch check - deliverPolicyNotSet = 99 -) - -func (p *DeliverPolicy) UnmarshalJSON(data []byte) error { - switch string(data) { - case jsonString("all"), jsonString("undefined"): - *p = DeliverAllPolicy - case jsonString("last"): - *p = DeliverLastPolicy - case jsonString("new"): - *p = DeliverNewPolicy - case jsonString("by_start_sequence"): - *p = DeliverByStartSequencePolicy - case jsonString("by_start_time"): - *p = DeliverByStartTimePolicy - case jsonString("last_per_subject"): - *p = DeliverLastPerSubjectPolicy - } - - return nil -} - -func (p DeliverPolicy) MarshalJSON() ([]byte, error) { - switch p { - case DeliverAllPolicy: - return json.Marshal("all") - case DeliverLastPolicy: - return json.Marshal("last") - case DeliverNewPolicy: - return json.Marshal("new") - case DeliverByStartSequencePolicy: - return json.Marshal("by_start_sequence") - case DeliverByStartTimePolicy: - return json.Marshal("by_start_time") - case DeliverLastPerSubjectPolicy: - return json.Marshal("last_per_subject") - default: - return nil, fmt.Errorf("nats: unknown deliver policy %v", p) - } -} - -// RetentionPolicy determines how messages in a set are retained. -type RetentionPolicy int - -const ( - // LimitsPolicy (default) means that messages are retained until any given limit is reached. - // This could be one of MaxMsgs, MaxBytes, or MaxAge. - LimitsPolicy RetentionPolicy = iota - // InterestPolicy specifies that when all known observables have acknowledged a message it can be removed. - InterestPolicy - // WorkQueuePolicy specifies that when the first worker or subscriber acknowledges the message it can be removed. - WorkQueuePolicy -) - -// DiscardPolicy determines how to proceed when limits of messages or bytes are -// reached. -type DiscardPolicy int - -const ( - // DiscardOld will remove older messages to return to the limits. This is - // the default. - DiscardOld DiscardPolicy = iota - //DiscardNew will fail to store new messages. - DiscardNew -) - -const ( - limitsPolicyString = "limits" - interestPolicyString = "interest" - workQueuePolicyString = "workqueue" -) - -func (rp RetentionPolicy) String() string { - switch rp { - case LimitsPolicy: - return "Limits" - case InterestPolicy: - return "Interest" - case WorkQueuePolicy: - return "WorkQueue" - default: - return "Unknown Retention Policy" - } -} - -func (rp RetentionPolicy) MarshalJSON() ([]byte, error) { - switch rp { - case LimitsPolicy: - return json.Marshal(limitsPolicyString) - case InterestPolicy: - return json.Marshal(interestPolicyString) - case WorkQueuePolicy: - return json.Marshal(workQueuePolicyString) - default: - return nil, fmt.Errorf("nats: can not marshal %v", rp) - } -} - -func (rp *RetentionPolicy) UnmarshalJSON(data []byte) error { - switch string(data) { - case jsonString(limitsPolicyString): - *rp = LimitsPolicy - case jsonString(interestPolicyString): - *rp = InterestPolicy - case jsonString(workQueuePolicyString): - *rp = WorkQueuePolicy - default: - return fmt.Errorf("nats: can not unmarshal %q", data) - } - return nil -} - -func (dp DiscardPolicy) String() string { - switch dp { - case DiscardOld: - return "DiscardOld" - case DiscardNew: - return "DiscardNew" - default: - return "Unknown Discard Policy" - } -} - -func (dp DiscardPolicy) MarshalJSON() ([]byte, error) { - switch dp { - case DiscardOld: - return json.Marshal("old") - case DiscardNew: - return json.Marshal("new") - default: - return nil, fmt.Errorf("nats: can not marshal %v", dp) - } -} - -func (dp *DiscardPolicy) UnmarshalJSON(data []byte) error { - switch strings.ToLower(string(data)) { - case jsonString("old"): - *dp = DiscardOld - case jsonString("new"): - *dp = DiscardNew - default: - return fmt.Errorf("nats: can not unmarshal %q", data) - } - return nil -} - -// StorageType determines how messages are stored for retention. -type StorageType int - -const ( - // FileStorage specifies on disk storage. It's the default. - FileStorage StorageType = iota - // MemoryStorage specifies in memory only. - MemoryStorage -) - -const ( - memoryStorageString = "memory" - fileStorageString = "file" -) - -func (st StorageType) String() string { - switch st { - case MemoryStorage: - return "Memory" - case FileStorage: - return "File" - default: - return "Unknown Storage Type" - } -} - -func (st StorageType) MarshalJSON() ([]byte, error) { - switch st { - case MemoryStorage: - return json.Marshal(memoryStorageString) - case FileStorage: - return json.Marshal(fileStorageString) - default: - return nil, fmt.Errorf("nats: can not marshal %v", st) - } -} - -func (st *StorageType) UnmarshalJSON(data []byte) error { - switch string(data) { - case jsonString(memoryStorageString): - *st = MemoryStorage - case jsonString(fileStorageString): - *st = FileStorage - default: - return fmt.Errorf("nats: can not unmarshal %q", data) - } - return nil -} - -type StoreCompression uint8 - -const ( - NoCompression StoreCompression = iota - S2Compression -) - -func (alg StoreCompression) String() string { - switch alg { - case NoCompression: - return "None" - case S2Compression: - return "S2" - default: - return "Unknown StoreCompression" - } -} - -func (alg StoreCompression) MarshalJSON() ([]byte, error) { - var str string - switch alg { - case S2Compression: - str = "s2" - case NoCompression: - str = "none" - default: - return nil, fmt.Errorf("unknown compression algorithm") - } - return json.Marshal(str) -} - -func (alg *StoreCompression) UnmarshalJSON(b []byte) error { - var str string - if err := json.Unmarshal(b, &str); err != nil { - return err - } - switch str { - case "s2": - *alg = S2Compression - case "none": - *alg = NoCompression - default: - return fmt.Errorf("unknown compression algorithm") - } - return nil -} - -// Length of our hash used for named consumers. -const nameHashLen = 8 - -// Computes a hash for the given `name`. -func getHash(name string) string { - sha := sha256.New() - sha.Write([]byte(name)) - b := sha.Sum(nil) - for i := 0; i < nameHashLen; i++ { - b[i] = rdigits[int(b[i]%base)] - } - return string(b[:nameHashLen]) -} diff --git a/vendor/github.com/nats-io/nats.go/jserrors.go b/vendor/github.com/nats-io/nats.go/jserrors.go deleted file mode 100644 index c8b1f5fc..00000000 --- a/vendor/github.com/nats-io/nats.go/jserrors.go +++ /dev/null @@ -1,235 +0,0 @@ -// Copyright 2020-2022 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nats - -import ( - "errors" - "fmt" -) - -var ( - // API errors - - // ErrJetStreamNotEnabled is an error returned when JetStream is not enabled for an account. - ErrJetStreamNotEnabled JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeJetStreamNotEnabled, Description: "jetstream not enabled", Code: 503}} - - // ErrJetStreamNotEnabledForAccount is an error returned when JetStream is not enabled for an account. - ErrJetStreamNotEnabledForAccount JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeJetStreamNotEnabledForAccount, Description: "jetstream not enabled for account", Code: 503}} - - // ErrStreamNotFound is an error returned when stream with given name does not exist. - ErrStreamNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamNotFound, Description: "stream not found", Code: 404}} - - // ErrStreamNameAlreadyInUse is returned when a stream with given name already exists and has a different configuration. - ErrStreamNameAlreadyInUse JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamNameInUse, Description: "stream name already in use", Code: 400}} - - // ErrStreamSubjectTransformNotSupported is returned when the connected nats-server version does not support setting - // the stream subject transform. If this error is returned when executing AddStream(), the stream with invalid - // configuration was already created in the server. - ErrStreamSubjectTransformNotSupported JetStreamError = &jsError{message: "stream subject transformation not supported by nats-server"} - - // ErrStreamSourceSubjectTransformNotSupported is returned when the connected nats-server version does not support setting - // the stream source subject transform. If this error is returned when executing AddStream(), the stream with invalid - // configuration was already created in the server. - ErrStreamSourceSubjectTransformNotSupported JetStreamError = &jsError{message: "stream subject transformation not supported by nats-server"} - - // ErrStreamSourceNotSupported is returned when the connected nats-server version does not support setting - // the stream sources. If this error is returned when executing AddStream(), the stream with invalid - // configuration was already created in the server. - ErrStreamSourceNotSupported JetStreamError = &jsError{message: "stream sourcing is not supported by nats-server"} - - // ErrStreamSourceMultipleSubjectTransformsNotSupported is returned when the connected nats-server version does not support setting - // the stream sources. If this error is returned when executing AddStream(), the stream with invalid - // configuration was already created in the server. - ErrStreamSourceMultipleSubjectTransformsNotSupported JetStreamError = &jsError{message: "stream sourceing with multiple subject transforms not supported by nats-server"} - - // ErrConsumerNotFound is an error returned when consumer with given name does not exist. - ErrConsumerNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerNotFound, Description: "consumer not found", Code: 404}} - - // ErrMsgNotFound is returned when message with provided sequence number does npt exist. - ErrMsgNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeMessageNotFound, Description: "message not found", Code: 404}} - - // ErrBadRequest is returned when invalid request is sent to JetStream API. - ErrBadRequest JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeBadRequest, Description: "bad request", Code: 400}} - - // ErrDuplicateFilterSubjects is returned when both FilterSubject and FilterSubjects are specified when creating consumer. - ErrDuplicateFilterSubjects JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeDuplicateFilterSubjects, Description: "consumer cannot have both FilterSubject and FilterSubjects specified", Code: 500}} - - // ErrDuplicateFilterSubjects is returned when filter subjects overlap when creating consumer. - ErrOverlappingFilterSubjects JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeOverlappingFilterSubjects, Description: "consumer subject filters cannot overlap", Code: 500}} - - // ErrEmptyFilter is returned when a filter in FilterSubjects is empty. - ErrEmptyFilter JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerEmptyFilter, Description: "consumer filter in FilterSubjects cannot be empty", Code: 500}} - - // Client errors - - // ErrConsumerNameAlreadyInUse is an error returned when consumer with given name already exists. - ErrConsumerNameAlreadyInUse JetStreamError = &jsError{message: "consumer name already in use"} - - // ErrConsumerNotActive is an error returned when consumer is not active. - ErrConsumerNotActive JetStreamError = &jsError{message: "consumer not active"} - - // ErrInvalidJSAck is returned when JetStream ack from message publish is invalid. - ErrInvalidJSAck JetStreamError = &jsError{message: "invalid jetstream publish response"} - - // ErrStreamConfigRequired is returned when empty stream configuration is supplied to add/update stream. - ErrStreamConfigRequired JetStreamError = &jsError{message: "stream configuration is required"} - - // ErrStreamNameRequired is returned when the provided stream name is empty. - ErrStreamNameRequired JetStreamError = &jsError{message: "stream name is required"} - - // ErrConsumerNameRequired is returned when the provided consumer durable name is empty. - ErrConsumerNameRequired JetStreamError = &jsError{message: "consumer name is required"} - - // ErrConsumerMultipleFilterSubjectsNotSupported is returned when the connected nats-server version does not support setting - // multiple filter subjects with filter_subjects field. If this error is returned when executing AddConsumer(), the consumer with invalid - // configuration was already created in the server. - ErrConsumerMultipleFilterSubjectsNotSupported JetStreamError = &jsError{message: "multiple consumer filter subjects not supported by nats-server"} - - // ErrConsumerConfigRequired is returned when empty consumer consuguration is supplied to add/update consumer. - ErrConsumerConfigRequired JetStreamError = &jsError{message: "consumer configuration is required"} - - // ErrPullSubscribeToPushConsumer is returned when attempting to use PullSubscribe on push consumer. - ErrPullSubscribeToPushConsumer JetStreamError = &jsError{message: "cannot pull subscribe to push based consumer"} - - // ErrPullSubscribeRequired is returned when attempting to use subscribe methods not suitable for pull consumers for pull consumers. - ErrPullSubscribeRequired JetStreamError = &jsError{message: "must use pull subscribe to bind to pull based consumer"} - - // ErrMsgAlreadyAckd is returned when attempting to acknowledge message more than once. - ErrMsgAlreadyAckd JetStreamError = &jsError{message: "message was already acknowledged"} - - // ErrNoStreamResponse is returned when there is no response from stream (e.g. no responders error). - ErrNoStreamResponse JetStreamError = &jsError{message: "no response from stream"} - - // ErrNotJSMessage is returned when attempting to get metadata from non JetStream message . - ErrNotJSMessage JetStreamError = &jsError{message: "not a jetstream message"} - - // ErrInvalidStreamName is returned when the provided stream name is invalid (contains '.' or ' '). - ErrInvalidStreamName JetStreamError = &jsError{message: "invalid stream name"} - - // ErrInvalidConsumerName is returned when the provided consumer name is invalid (contains '.' or ' '). - ErrInvalidConsumerName JetStreamError = &jsError{message: "invalid consumer name"} - - // ErrNoMatchingStream is returned when stream lookup by subject is unsuccessful. - ErrNoMatchingStream JetStreamError = &jsError{message: "no stream matches subject"} - - // ErrSubjectMismatch is returned when the provided subject does not match consumer's filter subject. - ErrSubjectMismatch JetStreamError = &jsError{message: "subject does not match consumer"} - - // ErrContextAndTimeout is returned when attempting to use both context and timeout. - ErrContextAndTimeout JetStreamError = &jsError{message: "context and timeout can not both be set"} - - // ErrCantAckIfConsumerAckNone is returned when attempting to ack a message for consumer with AckNone policy set. - ErrCantAckIfConsumerAckNone JetStreamError = &jsError{message: "cannot acknowledge a message for a consumer with AckNone policy"} - - // ErrConsumerDeleted is returned when attempting to send pull request to a consumer which does not exist - ErrConsumerDeleted JetStreamError = &jsError{message: "consumer deleted"} - - // ErrConsumerLeadershipChanged is returned when pending requests are no longer valid after leadership has changed - ErrConsumerLeadershipChanged JetStreamError = &jsError{message: "Leadership Changed"} - - // ErrNoHeartbeat is returned when no heartbeat is received from server when sending requests with pull consumer. - ErrNoHeartbeat JetStreamError = &jsError{message: "no heartbeat received"} - - // DEPRECATED: ErrInvalidDurableName is no longer returned and will be removed in future releases. - // Use ErrInvalidConsumerName instead. - ErrInvalidDurableName = errors.New("nats: invalid durable name") -) - -// Error code represents JetStream error codes returned by the API -type ErrorCode uint16 - -const ( - JSErrCodeJetStreamNotEnabledForAccount ErrorCode = 10039 - JSErrCodeJetStreamNotEnabled ErrorCode = 10076 - JSErrCodeInsufficientResourcesErr ErrorCode = 10023 - - JSErrCodeStreamNotFound ErrorCode = 10059 - JSErrCodeStreamNameInUse ErrorCode = 10058 - - JSErrCodeConsumerNotFound ErrorCode = 10014 - JSErrCodeConsumerNameExists ErrorCode = 10013 - JSErrCodeConsumerAlreadyExists ErrorCode = 10105 - JSErrCodeDuplicateFilterSubjects ErrorCode = 10136 - JSErrCodeOverlappingFilterSubjects ErrorCode = 10138 - JSErrCodeConsumerEmptyFilter ErrorCode = 10139 - - JSErrCodeMessageNotFound ErrorCode = 10037 - - JSErrCodeBadRequest ErrorCode = 10003 - JSStreamInvalidConfig ErrorCode = 10052 - - JSErrCodeStreamWrongLastSequence ErrorCode = 10071 -) - -// APIError is included in all API responses if there was an error. -type APIError struct { - Code int `json:"code"` - ErrorCode ErrorCode `json:"err_code"` - Description string `json:"description,omitempty"` -} - -// Error prints the JetStream API error code and description -func (e *APIError) Error() string { - return fmt.Sprintf("nats: %s", e.Description) -} - -// APIError implements the JetStreamError interface. -func (e *APIError) APIError() *APIError { - return e -} - -// Is matches against an APIError. -func (e *APIError) Is(err error) bool { - if e == nil { - return false - } - // Extract internal APIError to match against. - var aerr *APIError - ok := errors.As(err, &aerr) - if !ok { - return ok - } - return e.ErrorCode == aerr.ErrorCode -} - -// JetStreamError is an error result that happens when using JetStream. -// In case of client-side error, `APIError()` returns nil -type JetStreamError interface { - APIError() *APIError - error -} - -type jsError struct { - apiErr *APIError - message string -} - -func (err *jsError) APIError() *APIError { - return err.apiErr -} - -func (err *jsError) Error() string { - if err.apiErr != nil && err.apiErr.Description != "" { - return err.apiErr.Error() - } - return fmt.Sprintf("nats: %s", err.message) -} - -func (err *jsError) Unwrap() error { - // Allow matching to embedded APIError in case there is one. - if err.apiErr == nil { - return nil - } - return err.apiErr -} diff --git a/vendor/github.com/nats-io/nats.go/jsm.go b/vendor/github.com/nats-io/nats.go/jsm.go deleted file mode 100644 index 266bf066..00000000 --- a/vendor/github.com/nats-io/nats.go/jsm.go +++ /dev/null @@ -1,1665 +0,0 @@ -// Copyright 2021-2022 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nats - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "strconv" - "strings" - "time" -) - -// JetStreamManager manages JetStream Streams and Consumers. -type JetStreamManager interface { - // AddStream creates a stream. - AddStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error) - - // UpdateStream updates a stream. - UpdateStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error) - - // DeleteStream deletes a stream. - DeleteStream(name string, opts ...JSOpt) error - - // StreamInfo retrieves information from a stream. - StreamInfo(stream string, opts ...JSOpt) (*StreamInfo, error) - - // PurgeStream purges a stream messages. - PurgeStream(name string, opts ...JSOpt) error - - // StreamsInfo can be used to retrieve a list of StreamInfo objects. - // DEPRECATED: Use Streams() instead. - StreamsInfo(opts ...JSOpt) <-chan *StreamInfo - - // Streams can be used to retrieve a list of StreamInfo objects. - Streams(opts ...JSOpt) <-chan *StreamInfo - - // StreamNames is used to retrieve a list of Stream names. - StreamNames(opts ...JSOpt) <-chan string - - // GetMsg retrieves a raw stream message stored in JetStream by sequence number. - // Use options nats.DirectGet() or nats.DirectGetNext() to trigger retrieval - // directly from a distributed group of servers (leader and replicas). - // The stream must have been created/updated with the AllowDirect boolean. - GetMsg(name string, seq uint64, opts ...JSOpt) (*RawStreamMsg, error) - - // GetLastMsg retrieves the last raw stream message stored in JetStream by subject. - // Use option nats.DirectGet() to trigger retrieval - // directly from a distributed group of servers (leader and replicas). - // The stream must have been created/updated with the AllowDirect boolean. - GetLastMsg(name, subject string, opts ...JSOpt) (*RawStreamMsg, error) - - // DeleteMsg deletes a message from a stream. The message is marked as erased, but its value is not overwritten. - DeleteMsg(name string, seq uint64, opts ...JSOpt) error - - // SecureDeleteMsg deletes a message from a stream. The deleted message is overwritten with random data - // As a result, this operation is slower than DeleteMsg() - SecureDeleteMsg(name string, seq uint64, opts ...JSOpt) error - - // AddConsumer adds a consumer to a stream. - AddConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) - - // UpdateConsumer updates an existing consumer. - UpdateConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) - - // DeleteConsumer deletes a consumer. - DeleteConsumer(stream, consumer string, opts ...JSOpt) error - - // ConsumerInfo retrieves information of a consumer from a stream. - ConsumerInfo(stream, name string, opts ...JSOpt) (*ConsumerInfo, error) - - // ConsumersInfo is used to retrieve a list of ConsumerInfo objects. - // DEPRECATED: Use Consumers() instead. - ConsumersInfo(stream string, opts ...JSOpt) <-chan *ConsumerInfo - - // Consumers is used to retrieve a list of ConsumerInfo objects. - Consumers(stream string, opts ...JSOpt) <-chan *ConsumerInfo - - // ConsumerNames is used to retrieve a list of Consumer names. - ConsumerNames(stream string, opts ...JSOpt) <-chan string - - // AccountInfo retrieves info about the JetStream usage from an account. - AccountInfo(opts ...JSOpt) (*AccountInfo, error) - - // StreamNameBySubject returns a stream matching given subject. - StreamNameBySubject(string, ...JSOpt) (string, error) -} - -// StreamConfig will determine the properties for a stream. -// There are sensible defaults for most. If no subjects are -// given the name will be used as the only subject. -type StreamConfig struct { - Name string `json:"name"` - Description string `json:"description,omitempty"` - Subjects []string `json:"subjects,omitempty"` - Retention RetentionPolicy `json:"retention"` - MaxConsumers int `json:"max_consumers"` - MaxMsgs int64 `json:"max_msgs"` - MaxBytes int64 `json:"max_bytes"` - Discard DiscardPolicy `json:"discard"` - DiscardNewPerSubject bool `json:"discard_new_per_subject,omitempty"` - MaxAge time.Duration `json:"max_age"` - MaxMsgsPerSubject int64 `json:"max_msgs_per_subject"` - MaxMsgSize int32 `json:"max_msg_size,omitempty"` - Storage StorageType `json:"storage"` - Replicas int `json:"num_replicas"` - NoAck bool `json:"no_ack,omitempty"` - Template string `json:"template_owner,omitempty"` - Duplicates time.Duration `json:"duplicate_window,omitempty"` - Placement *Placement `json:"placement,omitempty"` - Mirror *StreamSource `json:"mirror,omitempty"` - Sources []*StreamSource `json:"sources,omitempty"` - Sealed bool `json:"sealed,omitempty"` - DenyDelete bool `json:"deny_delete,omitempty"` - DenyPurge bool `json:"deny_purge,omitempty"` - AllowRollup bool `json:"allow_rollup_hdrs,omitempty"` - Compression StoreCompression `json:"compression"` - FirstSeq uint64 `json:"first_seq,omitempty"` - - // Allow applying a subject transform to incoming messages before doing anything else. - SubjectTransform *SubjectTransformConfig `json:"subject_transform,omitempty"` - - // Allow republish of the message after being sequenced and stored. - RePublish *RePublish `json:"republish,omitempty"` - - // Allow higher performance, direct access to get individual messages. E.g. KeyValue - AllowDirect bool `json:"allow_direct"` - // Allow higher performance and unified direct access for mirrors as well. - MirrorDirect bool `json:"mirror_direct"` - - // Limits for consumers on this stream. - ConsumerLimits StreamConsumerLimits `json:"consumer_limits,omitempty"` - - // Metadata is additional metadata for the Stream. - // Keys starting with `_nats` are reserved. - // NOTE: Metadata requires nats-server v2.10.0+ - Metadata map[string]string `json:"metadata,omitempty"` -} - -// SubjectTransformConfig is for applying a subject transform (to matching messages) before doing anything else when a new message is received. -type SubjectTransformConfig struct { - Source string `json:"src,omitempty"` - Destination string `json:"dest"` -} - -// RePublish is for republishing messages once committed to a stream. The original -// subject cis remapped from the subject pattern to the destination pattern. -type RePublish struct { - Source string `json:"src,omitempty"` - Destination string `json:"dest"` - HeadersOnly bool `json:"headers_only,omitempty"` -} - -// Placement is used to guide placement of streams in clustered JetStream. -type Placement struct { - Cluster string `json:"cluster"` - Tags []string `json:"tags,omitempty"` -} - -// StreamSource dictates how streams can source from other streams. -type StreamSource struct { - Name string `json:"name"` - OptStartSeq uint64 `json:"opt_start_seq,omitempty"` - OptStartTime *time.Time `json:"opt_start_time,omitempty"` - FilterSubject string `json:"filter_subject,omitempty"` - SubjectTransforms []SubjectTransformConfig `json:"subject_transforms,omitempty"` - External *ExternalStream `json:"external,omitempty"` - Domain string `json:"-"` -} - -// ExternalStream allows you to qualify access to a stream source in another -// account. -type ExternalStream struct { - APIPrefix string `json:"api"` - DeliverPrefix string `json:"deliver,omitempty"` -} - -// StreamConsumerLimits are the limits for a consumer on a stream. -// These can be overridden on a per consumer basis. -type StreamConsumerLimits struct { - InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"` - MaxAckPending int `json:"max_ack_pending,omitempty"` -} - -// Helper for copying when we do not want to change user's version. -func (ss *StreamSource) copy() *StreamSource { - nss := *ss - // Check pointers - if ss.OptStartTime != nil { - t := *ss.OptStartTime - nss.OptStartTime = &t - } - if ss.External != nil { - ext := *ss.External - nss.External = &ext - } - return &nss -} - -// If we have a Domain, convert to the appropriate ext.APIPrefix. -// This will change the stream source, so should be a copy passed in. -func (ss *StreamSource) convertDomain() error { - if ss.Domain == _EMPTY_ { - return nil - } - if ss.External != nil { - // These should be mutually exclusive. - // TODO(dlc) - Make generic? - return errors.New("nats: domain and external are both set") - } - ss.External = &ExternalStream{APIPrefix: fmt.Sprintf(jsExtDomainT, ss.Domain)} - return nil -} - -// apiResponse is a standard response from the JetStream JSON API -type apiResponse struct { - Type string `json:"type"` - Error *APIError `json:"error,omitempty"` -} - -// apiPaged includes variables used to create paged responses from the JSON API -type apiPaged struct { - Total int `json:"total"` - Offset int `json:"offset"` - Limit int `json:"limit"` -} - -// apiPagedRequest includes parameters allowing specific pages to be requested -// from APIs responding with apiPaged. -type apiPagedRequest struct { - Offset int `json:"offset,omitempty"` -} - -// AccountInfo contains info about the JetStream usage from the current account. -type AccountInfo struct { - Tier - Domain string `json:"domain"` - API APIStats `json:"api"` - Tiers map[string]Tier `json:"tiers"` -} - -type Tier struct { - Memory uint64 `json:"memory"` - Store uint64 `json:"storage"` - Streams int `json:"streams"` - Consumers int `json:"consumers"` - Limits AccountLimits `json:"limits"` -} - -// APIStats reports on API calls to JetStream for this account. -type APIStats struct { - Total uint64 `json:"total"` - Errors uint64 `json:"errors"` -} - -// AccountLimits includes the JetStream limits of the current account. -type AccountLimits struct { - MaxMemory int64 `json:"max_memory"` - MaxStore int64 `json:"max_storage"` - MaxStreams int `json:"max_streams"` - MaxConsumers int `json:"max_consumers"` - MaxAckPending int `json:"max_ack_pending"` - MemoryMaxStreamBytes int64 `json:"memory_max_stream_bytes"` - StoreMaxStreamBytes int64 `json:"storage_max_stream_bytes"` - MaxBytesRequired bool `json:"max_bytes_required"` -} - -type accountInfoResponse struct { - apiResponse - AccountInfo -} - -// AccountInfo retrieves info about the JetStream usage from the current account. -// If JetStream is not enabled, this will return ErrJetStreamNotEnabled -// Other errors can happen but are generally considered retryable -func (js *js) AccountInfo(opts ...JSOpt) (*AccountInfo, error) { - o, cancel, err := getJSContextOpts(js.opts, opts...) - if err != nil { - return nil, err - } - if cancel != nil { - defer cancel() - } - - resp, err := js.apiRequestWithContext(o.ctx, js.apiSubj(apiAccountInfo), nil) - if err != nil { - // todo maybe nats server should never have no responder on this subject and always respond if they know there is no js to be had - if err == ErrNoResponders { - err = ErrJetStreamNotEnabled - } - return nil, err - } - var info accountInfoResponse - if err := json.Unmarshal(resp.Data, &info); err != nil { - return nil, err - } - if info.Error != nil { - // Internally checks based on error code instead of description match. - if errors.Is(info.Error, ErrJetStreamNotEnabledForAccount) { - return nil, ErrJetStreamNotEnabledForAccount - } - return nil, info.Error - } - - return &info.AccountInfo, nil -} - -type createConsumerRequest struct { - Stream string `json:"stream_name"` - Config *ConsumerConfig `json:"config"` -} - -type consumerResponse struct { - apiResponse - *ConsumerInfo -} - -// AddConsumer will add a JetStream consumer. -func (js *js) AddConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) { - if cfg == nil { - cfg = &ConsumerConfig{} - } - consumerName := cfg.Name - if consumerName == _EMPTY_ { - consumerName = cfg.Durable - } - if consumerName != _EMPTY_ { - consInfo, err := js.ConsumerInfo(stream, consumerName, opts...) - if err != nil && !errors.Is(err, ErrConsumerNotFound) && !errors.Is(err, ErrStreamNotFound) { - return nil, err - } - - if consInfo != nil { - sameConfig := checkConfig(&consInfo.Config, cfg) - if sameConfig != nil { - return nil, fmt.Errorf("%w: creating consumer %q on stream %q", ErrConsumerNameAlreadyInUse, consumerName, stream) - } else { - return consInfo, nil - } - } - } - - return js.upsertConsumer(stream, consumerName, cfg, opts...) -} - -func (js *js) UpdateConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) { - if cfg == nil { - return nil, ErrConsumerConfigRequired - } - consumerName := cfg.Name - if consumerName == _EMPTY_ { - consumerName = cfg.Durable - } - if consumerName == _EMPTY_ { - return nil, ErrConsumerNameRequired - } - return js.upsertConsumer(stream, consumerName, cfg, opts...) -} - -func (js *js) upsertConsumer(stream, consumerName string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) { - if err := checkStreamName(stream); err != nil { - return nil, err - } - o, cancel, err := getJSContextOpts(js.opts, opts...) - if err != nil { - return nil, err - } - if cancel != nil { - defer cancel() - } - - req, err := json.Marshal(&createConsumerRequest{Stream: stream, Config: cfg}) - if err != nil { - return nil, err - } - - var ccSubj string - if consumerName == _EMPTY_ { - // if consumer name is empty (neither Durable nor Name is set), use the legacy ephemeral endpoint - ccSubj = fmt.Sprintf(apiLegacyConsumerCreateT, stream) - } else if err := checkConsumerName(consumerName); err != nil { - return nil, err - } else if js.nc.serverMinVersion(2, 9, 0) { - if cfg.Durable != "" && js.opts.featureFlags.useDurableConsumerCreate { - // if user set the useDurableConsumerCreate flag, use the legacy DURABLE.CREATE endpoint - ccSubj = fmt.Sprintf(apiDurableCreateT, stream, consumerName) - } else if cfg.FilterSubject == _EMPTY_ || cfg.FilterSubject == ">" { - // if filter subject is empty or ">", use the endpoint without filter subject - ccSubj = fmt.Sprintf(apiConsumerCreateT, stream, consumerName) - } else { - // if filter subject is not empty, use the endpoint with filter subject - ccSubj = fmt.Sprintf(apiConsumerCreateWithFilterSubjectT, stream, consumerName, cfg.FilterSubject) - } - } else { - if cfg.Durable != "" { - // if Durable is set, use the DURABLE.CREATE endpoint - ccSubj = fmt.Sprintf(apiDurableCreateT, stream, consumerName) - } else { - // if Durable is not set, use the legacy ephemeral endpoint - ccSubj = fmt.Sprintf(apiLegacyConsumerCreateT, stream) - } - } - - resp, err := js.apiRequestWithContext(o.ctx, js.apiSubj(ccSubj), req) - if err != nil { - if err == ErrNoResponders { - err = ErrJetStreamNotEnabled - } - return nil, err - } - var info consumerResponse - err = json.Unmarshal(resp.Data, &info) - if err != nil { - return nil, err - } - if info.Error != nil { - if errors.Is(info.Error, ErrStreamNotFound) { - return nil, ErrStreamNotFound - } - if errors.Is(info.Error, ErrConsumerNotFound) { - return nil, ErrConsumerNotFound - } - return nil, info.Error - } - - // check whether multiple filter subjects (if used) are reflected in the returned ConsumerInfo - if len(cfg.FilterSubjects) != 0 && len(info.Config.FilterSubjects) == 0 { - return nil, ErrConsumerMultipleFilterSubjectsNotSupported - } - return info.ConsumerInfo, nil -} - -// consumerDeleteResponse is the response for a Consumer delete request. -type consumerDeleteResponse struct { - apiResponse - Success bool `json:"success,omitempty"` -} - -func checkStreamName(stream string) error { - if stream == _EMPTY_ { - return ErrStreamNameRequired - } - if strings.ContainsAny(stream, ". ") { - return ErrInvalidStreamName - } - return nil -} - -// Check that the consumer name is not empty and is valid (does not contain "." and " "). -// Additional consumer name validation is done in nats-server. -// Returns ErrConsumerNameRequired if consumer name is empty, ErrInvalidConsumerName is invalid, otherwise nil -func checkConsumerName(consumer string) error { - if consumer == _EMPTY_ { - return ErrConsumerNameRequired - } - if strings.ContainsAny(consumer, ". ") { - return ErrInvalidConsumerName - } - return nil -} - -// DeleteConsumer deletes a Consumer. -func (js *js) DeleteConsumer(stream, consumer string, opts ...JSOpt) error { - if err := checkStreamName(stream); err != nil { - return err - } - if err := checkConsumerName(consumer); err != nil { - return err - } - o, cancel, err := getJSContextOpts(js.opts, opts...) - if err != nil { - return err - } - if cancel != nil { - defer cancel() - } - - dcSubj := js.apiSubj(fmt.Sprintf(apiConsumerDeleteT, stream, consumer)) - r, err := js.apiRequestWithContext(o.ctx, dcSubj, nil) - if err != nil { - return err - } - var resp consumerDeleteResponse - if err := json.Unmarshal(r.Data, &resp); err != nil { - return err - } - - if resp.Error != nil { - if errors.Is(resp.Error, ErrConsumerNotFound) { - return ErrConsumerNotFound - } - return resp.Error - } - return nil -} - -// ConsumerInfo returns information about a Consumer. -func (js *js) ConsumerInfo(stream, consumer string, opts ...JSOpt) (*ConsumerInfo, error) { - if err := checkStreamName(stream); err != nil { - return nil, err - } - if err := checkConsumerName(consumer); err != nil { - return nil, err - } - o, cancel, err := getJSContextOpts(js.opts, opts...) - if err != nil { - return nil, err - } - if cancel != nil { - defer cancel() - } - return js.getConsumerInfoContext(o.ctx, stream, consumer) -} - -// consumerLister fetches pages of ConsumerInfo objects. This object is not -// safe to use for multiple threads. -type consumerLister struct { - stream string - js *js - - err error - offset int - page []*ConsumerInfo - pageInfo *apiPaged -} - -// consumersRequest is the type used for Consumers requests. -type consumersRequest struct { - apiPagedRequest -} - -// consumerListResponse is the response for a Consumers List request. -type consumerListResponse struct { - apiResponse - apiPaged - Consumers []*ConsumerInfo `json:"consumers"` -} - -// Next fetches the next ConsumerInfo page. -func (c *consumerLister) Next() bool { - if c.err != nil { - return false - } - if err := checkStreamName(c.stream); err != nil { - c.err = err - return false - } - if c.pageInfo != nil && c.offset >= c.pageInfo.Total { - return false - } - - req, err := json.Marshal(consumersRequest{ - apiPagedRequest: apiPagedRequest{Offset: c.offset}, - }) - if err != nil { - c.err = err - return false - } - - var cancel context.CancelFunc - ctx := c.js.opts.ctx - if ctx == nil { - ctx, cancel = context.WithTimeout(context.Background(), c.js.opts.wait) - defer cancel() - } - - clSubj := c.js.apiSubj(fmt.Sprintf(apiConsumerListT, c.stream)) - r, err := c.js.apiRequestWithContext(ctx, clSubj, req) - if err != nil { - c.err = err - return false - } - var resp consumerListResponse - if err := json.Unmarshal(r.Data, &resp); err != nil { - c.err = err - return false - } - if resp.Error != nil { - c.err = resp.Error - return false - } - - c.pageInfo = &resp.apiPaged - c.page = resp.Consumers - c.offset += len(c.page) - return true -} - -// Page returns the current ConsumerInfo page. -func (c *consumerLister) Page() []*ConsumerInfo { - return c.page -} - -// Err returns any errors found while fetching pages. -func (c *consumerLister) Err() error { - return c.err -} - -// Consumers is used to retrieve a list of ConsumerInfo objects. -func (jsc *js) Consumers(stream string, opts ...JSOpt) <-chan *ConsumerInfo { - o, cancel, err := getJSContextOpts(jsc.opts, opts...) - if err != nil { - return nil - } - - ch := make(chan *ConsumerInfo) - l := &consumerLister{js: &js{nc: jsc.nc, opts: o}, stream: stream} - go func() { - if cancel != nil { - defer cancel() - } - defer close(ch) - for l.Next() { - for _, info := range l.Page() { - select { - case ch <- info: - case <-o.ctx.Done(): - return - } - } - } - }() - - return ch -} - -// ConsumersInfo is used to retrieve a list of ConsumerInfo objects. -// DEPRECATED: Use Consumers() instead. -func (jsc *js) ConsumersInfo(stream string, opts ...JSOpt) <-chan *ConsumerInfo { - return jsc.Consumers(stream, opts...) -} - -type consumerNamesLister struct { - stream string - js *js - - err error - offset int - page []string - pageInfo *apiPaged -} - -// consumerNamesListResponse is the response for a Consumers Names List request. -type consumerNamesListResponse struct { - apiResponse - apiPaged - Consumers []string `json:"consumers"` -} - -// Next fetches the next consumer names page. -func (c *consumerNamesLister) Next() bool { - if c.err != nil { - return false - } - if err := checkStreamName(c.stream); err != nil { - c.err = err - return false - } - if c.pageInfo != nil && c.offset >= c.pageInfo.Total { - return false - } - - var cancel context.CancelFunc - ctx := c.js.opts.ctx - if ctx == nil { - ctx, cancel = context.WithTimeout(context.Background(), c.js.opts.wait) - defer cancel() - } - - req, err := json.Marshal(consumersRequest{ - apiPagedRequest: apiPagedRequest{Offset: c.offset}, - }) - if err != nil { - c.err = err - return false - } - clSubj := c.js.apiSubj(fmt.Sprintf(apiConsumerNamesT, c.stream)) - r, err := c.js.apiRequestWithContext(ctx, clSubj, req) - if err != nil { - c.err = err - return false - } - var resp consumerNamesListResponse - if err := json.Unmarshal(r.Data, &resp); err != nil { - c.err = err - return false - } - if resp.Error != nil { - c.err = resp.Error - return false - } - - c.pageInfo = &resp.apiPaged - c.page = resp.Consumers - c.offset += len(c.page) - return true -} - -// Page returns the current ConsumerInfo page. -func (c *consumerNamesLister) Page() []string { - return c.page -} - -// Err returns any errors found while fetching pages. -func (c *consumerNamesLister) Err() error { - return c.err -} - -// ConsumerNames is used to retrieve a list of Consumer names. -func (jsc *js) ConsumerNames(stream string, opts ...JSOpt) <-chan string { - o, cancel, err := getJSContextOpts(jsc.opts, opts...) - if err != nil { - return nil - } - - ch := make(chan string) - l := &consumerNamesLister{stream: stream, js: &js{nc: jsc.nc, opts: o}} - go func() { - if cancel != nil { - defer cancel() - } - defer close(ch) - for l.Next() { - for _, info := range l.Page() { - select { - case ch <- info: - case <-o.ctx.Done(): - return - } - } - } - }() - - return ch -} - -// streamCreateResponse stream creation. -type streamCreateResponse struct { - apiResponse - *StreamInfo -} - -func (js *js) AddStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error) { - if cfg == nil { - return nil, ErrStreamConfigRequired - } - if err := checkStreamName(cfg.Name); err != nil { - return nil, err - } - o, cancel, err := getJSContextOpts(js.opts, opts...) - if err != nil { - return nil, err - } - if cancel != nil { - defer cancel() - } - - // In case we need to change anything, copy so we do not change the caller's version. - ncfg := *cfg - - // If we have a mirror and an external domain, convert to ext.APIPrefix. - if cfg.Mirror != nil && cfg.Mirror.Domain != _EMPTY_ { - // Copy so we do not change the caller's version. - ncfg.Mirror = ncfg.Mirror.copy() - if err := ncfg.Mirror.convertDomain(); err != nil { - return nil, err - } - } - // Check sources for the same. - if len(ncfg.Sources) > 0 { - ncfg.Sources = append([]*StreamSource(nil), ncfg.Sources...) - for i, ss := range ncfg.Sources { - if ss.Domain != _EMPTY_ { - ncfg.Sources[i] = ss.copy() - if err := ncfg.Sources[i].convertDomain(); err != nil { - return nil, err - } - } - } - } - - req, err := json.Marshal(&ncfg) - if err != nil { - return nil, err - } - - csSubj := js.apiSubj(fmt.Sprintf(apiStreamCreateT, cfg.Name)) - r, err := js.apiRequestWithContext(o.ctx, csSubj, req) - if err != nil { - return nil, err - } - var resp streamCreateResponse - if err := json.Unmarshal(r.Data, &resp); err != nil { - return nil, err - } - if resp.Error != nil { - if errors.Is(resp.Error, ErrStreamNameAlreadyInUse) { - return nil, ErrStreamNameAlreadyInUse - } - return nil, resp.Error - } - - // check that input subject transform (if used) is reflected in the returned ConsumerInfo - if cfg.SubjectTransform != nil && resp.StreamInfo.Config.SubjectTransform == nil { - return nil, ErrStreamSubjectTransformNotSupported - } - if len(cfg.Sources) != 0 { - if len(cfg.Sources) != len(resp.Config.Sources) { - return nil, ErrStreamSourceNotSupported - } - for i := range cfg.Sources { - if len(cfg.Sources[i].SubjectTransforms) != 0 && len(resp.Sources[i].SubjectTransforms) == 0 { - return nil, ErrStreamSourceMultipleSubjectTransformsNotSupported - } - } - } - - return resp.StreamInfo, nil -} - -type ( - // StreamInfoRequest contains additional option to return - StreamInfoRequest struct { - apiPagedRequest - // DeletedDetails when true includes information about deleted messages - DeletedDetails bool `json:"deleted_details,omitempty"` - // SubjectsFilter when set, returns information on the matched subjects - SubjectsFilter string `json:"subjects_filter,omitempty"` - } - streamInfoResponse = struct { - apiResponse - apiPaged - *StreamInfo - } -) - -func (js *js) StreamInfo(stream string, opts ...JSOpt) (*StreamInfo, error) { - if err := checkStreamName(stream); err != nil { - return nil, err - } - o, cancel, err := getJSContextOpts(js.opts, opts...) - if err != nil { - return nil, err - } - if cancel != nil { - defer cancel() - } - - var i int - var subjectMessagesMap map[string]uint64 - var req []byte - var requestPayload bool - - var siOpts StreamInfoRequest - if o.streamInfoOpts != nil { - requestPayload = true - siOpts = *o.streamInfoOpts - } - - for { - if requestPayload { - siOpts.Offset = i - if req, err = json.Marshal(&siOpts); err != nil { - return nil, err - } - } - - siSubj := js.apiSubj(fmt.Sprintf(apiStreamInfoT, stream)) - - r, err := js.apiRequestWithContext(o.ctx, siSubj, req) - if err != nil { - return nil, err - } - - var resp streamInfoResponse - if err := json.Unmarshal(r.Data, &resp); err != nil { - return nil, err - } - - if resp.Error != nil { - if errors.Is(resp.Error, ErrStreamNotFound) { - return nil, ErrStreamNotFound - } - return nil, resp.Error - } - - var total int - // for backwards compatibility - if resp.Total != 0 { - total = resp.Total - } else { - total = len(resp.State.Subjects) - } - - if requestPayload && len(resp.StreamInfo.State.Subjects) > 0 { - if subjectMessagesMap == nil { - subjectMessagesMap = make(map[string]uint64, total) - } - - for k, j := range resp.State.Subjects { - subjectMessagesMap[k] = j - i++ - } - } - - if i >= total { - if requestPayload { - resp.StreamInfo.State.Subjects = subjectMessagesMap - } - return resp.StreamInfo, nil - } - } -} - -// StreamInfo shows config and current state for this stream. -type StreamInfo struct { - Config StreamConfig `json:"config"` - Created time.Time `json:"created"` - State StreamState `json:"state"` - Cluster *ClusterInfo `json:"cluster,omitempty"` - Mirror *StreamSourceInfo `json:"mirror,omitempty"` - Sources []*StreamSourceInfo `json:"sources,omitempty"` - Alternates []*StreamAlternate `json:"alternates,omitempty"` -} - -// StreamAlternate is an alternate stream represented by a mirror. -type StreamAlternate struct { - Name string `json:"name"` - Domain string `json:"domain,omitempty"` - Cluster string `json:"cluster"` -} - -// StreamSourceInfo shows information about an upstream stream source. -type StreamSourceInfo struct { - Name string `json:"name"` - Lag uint64 `json:"lag"` - Active time.Duration `json:"active"` - External *ExternalStream `json:"external"` - Error *APIError `json:"error"` - FilterSubject string `json:"filter_subject,omitempty"` - SubjectTransforms []SubjectTransformConfig `json:"subject_transforms,omitempty"` -} - -// StreamState is information about the given stream. -type StreamState struct { - Msgs uint64 `json:"messages"` - Bytes uint64 `json:"bytes"` - FirstSeq uint64 `json:"first_seq"` - FirstTime time.Time `json:"first_ts"` - LastSeq uint64 `json:"last_seq"` - LastTime time.Time `json:"last_ts"` - Consumers int `json:"consumer_count"` - Deleted []uint64 `json:"deleted"` - NumDeleted int `json:"num_deleted"` - NumSubjects uint64 `json:"num_subjects"` - Subjects map[string]uint64 `json:"subjects"` -} - -// ClusterInfo shows information about the underlying set of servers -// that make up the stream or consumer. -type ClusterInfo struct { - Name string `json:"name,omitempty"` - Leader string `json:"leader,omitempty"` - Replicas []*PeerInfo `json:"replicas,omitempty"` -} - -// PeerInfo shows information about all the peers in the cluster that -// are supporting the stream or consumer. -type PeerInfo struct { - Name string `json:"name"` - Current bool `json:"current"` - Offline bool `json:"offline,omitempty"` - Active time.Duration `json:"active"` - Lag uint64 `json:"lag,omitempty"` -} - -// UpdateStream updates a Stream. -func (js *js) UpdateStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error) { - if cfg == nil { - return nil, ErrStreamConfigRequired - } - if err := checkStreamName(cfg.Name); err != nil { - return nil, err - } - o, cancel, err := getJSContextOpts(js.opts, opts...) - if err != nil { - return nil, err - } - if cancel != nil { - defer cancel() - } - - req, err := json.Marshal(cfg) - if err != nil { - return nil, err - } - - usSubj := js.apiSubj(fmt.Sprintf(apiStreamUpdateT, cfg.Name)) - r, err := js.apiRequestWithContext(o.ctx, usSubj, req) - if err != nil { - return nil, err - } - var resp streamInfoResponse - if err := json.Unmarshal(r.Data, &resp); err != nil { - return nil, err - } - if resp.Error != nil { - if errors.Is(resp.Error, ErrStreamNotFound) { - return nil, ErrStreamNotFound - } - return nil, resp.Error - } - - // check that input subject transform (if used) is reflected in the returned StreamInfo - if cfg.SubjectTransform != nil && resp.StreamInfo.Config.SubjectTransform == nil { - return nil, ErrStreamSubjectTransformNotSupported - } - - if len(cfg.Sources) != 0 { - if len(cfg.Sources) != len(resp.Config.Sources) { - return nil, ErrStreamSourceNotSupported - } - for i := range cfg.Sources { - if len(cfg.Sources[i].SubjectTransforms) != 0 && len(resp.Sources[i].SubjectTransforms) == 0 { - return nil, ErrStreamSourceMultipleSubjectTransformsNotSupported - } - } - } - - return resp.StreamInfo, nil -} - -// streamDeleteResponse is the response for a Stream delete request. -type streamDeleteResponse struct { - apiResponse - Success bool `json:"success,omitempty"` -} - -// DeleteStream deletes a Stream. -func (js *js) DeleteStream(name string, opts ...JSOpt) error { - if err := checkStreamName(name); err != nil { - return err - } - o, cancel, err := getJSContextOpts(js.opts, opts...) - if err != nil { - return err - } - if cancel != nil { - defer cancel() - } - - dsSubj := js.apiSubj(fmt.Sprintf(apiStreamDeleteT, name)) - r, err := js.apiRequestWithContext(o.ctx, dsSubj, nil) - if err != nil { - return err - } - var resp streamDeleteResponse - if err := json.Unmarshal(r.Data, &resp); err != nil { - return err - } - - if resp.Error != nil { - if errors.Is(resp.Error, ErrStreamNotFound) { - return ErrStreamNotFound - } - return resp.Error - } - return nil -} - -type apiMsgGetRequest struct { - Seq uint64 `json:"seq,omitempty"` - LastFor string `json:"last_by_subj,omitempty"` - NextFor string `json:"next_by_subj,omitempty"` -} - -// RawStreamMsg is a raw message stored in JetStream. -type RawStreamMsg struct { - Subject string - Sequence uint64 - Header Header - Data []byte - Time time.Time -} - -// storedMsg is a raw message stored in JetStream. -type storedMsg struct { - Subject string `json:"subject"` - Sequence uint64 `json:"seq"` - Header []byte `json:"hdrs,omitempty"` - Data []byte `json:"data,omitempty"` - Time time.Time `json:"time"` -} - -// apiMsgGetResponse is the response for a Stream get request. -type apiMsgGetResponse struct { - apiResponse - Message *storedMsg `json:"message,omitempty"` -} - -// GetLastMsg retrieves the last raw stream message stored in JetStream by subject. -func (js *js) GetLastMsg(name, subject string, opts ...JSOpt) (*RawStreamMsg, error) { - return js.getMsg(name, &apiMsgGetRequest{LastFor: subject}, opts...) -} - -// GetMsg retrieves a raw stream message stored in JetStream by sequence number. -func (js *js) GetMsg(name string, seq uint64, opts ...JSOpt) (*RawStreamMsg, error) { - return js.getMsg(name, &apiMsgGetRequest{Seq: seq}, opts...) -} - -// Low level getMsg -func (js *js) getMsg(name string, mreq *apiMsgGetRequest, opts ...JSOpt) (*RawStreamMsg, error) { - o, cancel, err := getJSContextOpts(js.opts, opts...) - if err != nil { - return nil, err - } - if cancel != nil { - defer cancel() - } - - if err := checkStreamName(name); err != nil { - return nil, err - } - - var apiSubj string - if o.directGet && mreq.LastFor != _EMPTY_ { - apiSubj = apiDirectMsgGetLastBySubjectT - dsSubj := js.apiSubj(fmt.Sprintf(apiSubj, name, mreq.LastFor)) - r, err := js.apiRequestWithContext(o.ctx, dsSubj, nil) - if err != nil { - return nil, err - } - return convertDirectGetMsgResponseToMsg(name, r) - } - - if o.directGet { - apiSubj = apiDirectMsgGetT - mreq.NextFor = o.directNextFor - } else { - apiSubj = apiMsgGetT - } - - req, err := json.Marshal(mreq) - if err != nil { - return nil, err - } - - dsSubj := js.apiSubj(fmt.Sprintf(apiSubj, name)) - r, err := js.apiRequestWithContext(o.ctx, dsSubj, req) - if err != nil { - return nil, err - } - - if o.directGet { - return convertDirectGetMsgResponseToMsg(name, r) - } - - var resp apiMsgGetResponse - if err := json.Unmarshal(r.Data, &resp); err != nil { - return nil, err - } - if resp.Error != nil { - if errors.Is(resp.Error, ErrMsgNotFound) { - return nil, ErrMsgNotFound - } - if errors.Is(resp.Error, ErrStreamNotFound) { - return nil, ErrStreamNotFound - } - return nil, resp.Error - } - - msg := resp.Message - - var hdr Header - if len(msg.Header) > 0 { - hdr, err = DecodeHeadersMsg(msg.Header) - if err != nil { - return nil, err - } - } - - return &RawStreamMsg{ - Subject: msg.Subject, - Sequence: msg.Sequence, - Header: hdr, - Data: msg.Data, - Time: msg.Time, - }, nil -} - -func convertDirectGetMsgResponseToMsg(name string, r *Msg) (*RawStreamMsg, error) { - // Check for 404/408. We would get a no-payload message and a "Status" header - if len(r.Data) == 0 { - val := r.Header.Get(statusHdr) - if val != _EMPTY_ { - switch val { - case noMessagesSts: - return nil, ErrMsgNotFound - default: - desc := r.Header.Get(descrHdr) - if desc == _EMPTY_ { - desc = "unable to get message" - } - return nil, fmt.Errorf("nats: %s", desc) - } - } - } - // Check for headers that give us the required information to - // reconstruct the message. - if len(r.Header) == 0 { - return nil, fmt.Errorf("nats: response should have headers") - } - stream := r.Header.Get(JSStream) - if stream == _EMPTY_ { - return nil, fmt.Errorf("nats: missing stream header") - } - - // Mirrors can now answer direct gets, so removing check for name equality. - // TODO(dlc) - We could have server also have a header with origin and check that? - - seqStr := r.Header.Get(JSSequence) - if seqStr == _EMPTY_ { - return nil, fmt.Errorf("nats: missing sequence header") - } - seq, err := strconv.ParseUint(seqStr, 10, 64) - if err != nil { - return nil, fmt.Errorf("nats: invalid sequence header '%s': %v", seqStr, err) - } - timeStr := r.Header.Get(JSTimeStamp) - if timeStr == _EMPTY_ { - return nil, fmt.Errorf("nats: missing timestamp header") - } - // Temporary code: the server in main branch is sending with format - // "2006-01-02 15:04:05.999999999 +0000 UTC", but will be changed - // to use format RFC3339Nano. Because of server test deps/cycle, - // support both until the server PR lands. - tm, err := time.Parse(time.RFC3339Nano, timeStr) - if err != nil { - tm, err = time.Parse("2006-01-02 15:04:05.999999999 +0000 UTC", timeStr) - if err != nil { - return nil, fmt.Errorf("nats: invalid timestamp header '%s': %v", timeStr, err) - } - } - subj := r.Header.Get(JSSubject) - if subj == _EMPTY_ { - return nil, fmt.Errorf("nats: missing subject header") - } - return &RawStreamMsg{ - Subject: subj, - Sequence: seq, - Header: r.Header, - Data: r.Data, - Time: tm, - }, nil -} - -type msgDeleteRequest struct { - Seq uint64 `json:"seq"` - NoErase bool `json:"no_erase,omitempty"` -} - -// msgDeleteResponse is the response for a Stream delete request. -type msgDeleteResponse struct { - apiResponse - Success bool `json:"success,omitempty"` -} - -// DeleteMsg deletes a message from a stream. -// The message is marked as erased, but not overwritten -func (js *js) DeleteMsg(name string, seq uint64, opts ...JSOpt) error { - o, cancel, err := getJSContextOpts(js.opts, opts...) - if err != nil { - return err - } - if cancel != nil { - defer cancel() - } - - return js.deleteMsg(o.ctx, name, &msgDeleteRequest{Seq: seq, NoErase: true}) -} - -// SecureDeleteMsg deletes a message from a stream. The deleted message is overwritten with random data -// As a result, this operation is slower than DeleteMsg() -func (js *js) SecureDeleteMsg(name string, seq uint64, opts ...JSOpt) error { - o, cancel, err := getJSContextOpts(js.opts, opts...) - if err != nil { - return err - } - if cancel != nil { - defer cancel() - } - - return js.deleteMsg(o.ctx, name, &msgDeleteRequest{Seq: seq}) -} - -func (js *js) deleteMsg(ctx context.Context, stream string, req *msgDeleteRequest) error { - if err := checkStreamName(stream); err != nil { - return err - } - reqJSON, err := json.Marshal(req) - if err != nil { - return err - } - - dsSubj := js.apiSubj(fmt.Sprintf(apiMsgDeleteT, stream)) - r, err := js.apiRequestWithContext(ctx, dsSubj, reqJSON) - if err != nil { - return err - } - var resp msgDeleteResponse - if err := json.Unmarshal(r.Data, &resp); err != nil { - return err - } - if resp.Error != nil { - return resp.Error - } - return nil -} - -// StreamPurgeRequest is optional request information to the purge API. -type StreamPurgeRequest struct { - // Purge up to but not including sequence. - Sequence uint64 `json:"seq,omitempty"` - // Subject to match against messages for the purge command. - Subject string `json:"filter,omitempty"` - // Number of messages to keep. - Keep uint64 `json:"keep,omitempty"` -} - -type streamPurgeResponse struct { - apiResponse - Success bool `json:"success,omitempty"` - Purged uint64 `json:"purged"` -} - -// PurgeStream purges messages on a Stream. -func (js *js) PurgeStream(stream string, opts ...JSOpt) error { - if err := checkStreamName(stream); err != nil { - return err - } - var req *StreamPurgeRequest - var ok bool - for _, opt := range opts { - // For PurgeStream, only request body opt is relevant - if req, ok = opt.(*StreamPurgeRequest); ok { - break - } - } - return js.purgeStream(stream, req) -} - -func (js *js) purgeStream(stream string, req *StreamPurgeRequest, opts ...JSOpt) error { - o, cancel, err := getJSContextOpts(js.opts, opts...) - if err != nil { - return err - } - if cancel != nil { - defer cancel() - } - - var b []byte - if req != nil { - if b, err = json.Marshal(req); err != nil { - return err - } - } - - psSubj := js.apiSubj(fmt.Sprintf(apiStreamPurgeT, stream)) - r, err := js.apiRequestWithContext(o.ctx, psSubj, b) - if err != nil { - return err - } - var resp streamPurgeResponse - if err := json.Unmarshal(r.Data, &resp); err != nil { - return err - } - if resp.Error != nil { - if errors.Is(resp.Error, ErrBadRequest) { - return fmt.Errorf("%w: %s", ErrBadRequest, "invalid purge request body") - } - return resp.Error - } - return nil -} - -// streamLister fetches pages of StreamInfo objects. This object is not safe -// to use for multiple threads. -type streamLister struct { - js *js - page []*StreamInfo - err error - - offset int - pageInfo *apiPaged -} - -// streamListResponse list of detailed stream information. -// A nil request is valid and means all streams. -type streamListResponse struct { - apiResponse - apiPaged - Streams []*StreamInfo `json:"streams"` -} - -// streamNamesRequest is used for Stream Name requests. -type streamNamesRequest struct { - apiPagedRequest - // These are filters that can be applied to the list. - Subject string `json:"subject,omitempty"` -} - -// Next fetches the next StreamInfo page. -func (s *streamLister) Next() bool { - if s.err != nil { - return false - } - if s.pageInfo != nil && s.offset >= s.pageInfo.Total { - return false - } - - req, err := json.Marshal(streamNamesRequest{ - apiPagedRequest: apiPagedRequest{Offset: s.offset}, - Subject: s.js.opts.streamListSubject, - }) - if err != nil { - s.err = err - return false - } - - var cancel context.CancelFunc - ctx := s.js.opts.ctx - if ctx == nil { - ctx, cancel = context.WithTimeout(context.Background(), s.js.opts.wait) - defer cancel() - } - - slSubj := s.js.apiSubj(apiStreamListT) - r, err := s.js.apiRequestWithContext(ctx, slSubj, req) - if err != nil { - s.err = err - return false - } - var resp streamListResponse - if err := json.Unmarshal(r.Data, &resp); err != nil { - s.err = err - return false - } - if resp.Error != nil { - s.err = resp.Error - return false - } - - s.pageInfo = &resp.apiPaged - s.page = resp.Streams - s.offset += len(s.page) - return true -} - -// Page returns the current StreamInfo page. -func (s *streamLister) Page() []*StreamInfo { - return s.page -} - -// Err returns any errors found while fetching pages. -func (s *streamLister) Err() error { - return s.err -} - -// Streams can be used to retrieve a list of StreamInfo objects. -func (jsc *js) Streams(opts ...JSOpt) <-chan *StreamInfo { - o, cancel, err := getJSContextOpts(jsc.opts, opts...) - if err != nil { - return nil - } - - ch := make(chan *StreamInfo) - l := &streamLister{js: &js{nc: jsc.nc, opts: o}} - go func() { - if cancel != nil { - defer cancel() - } - defer close(ch) - for l.Next() { - for _, info := range l.Page() { - select { - case ch <- info: - case <-o.ctx.Done(): - return - } - } - } - }() - - return ch -} - -// StreamsInfo can be used to retrieve a list of StreamInfo objects. -// DEPRECATED: Use Streams() instead. -func (jsc *js) StreamsInfo(opts ...JSOpt) <-chan *StreamInfo { - return jsc.Streams(opts...) -} - -type streamNamesLister struct { - js *js - - err error - offset int - page []string - pageInfo *apiPaged -} - -// Next fetches the next stream names page. -func (l *streamNamesLister) Next() bool { - if l.err != nil { - return false - } - if l.pageInfo != nil && l.offset >= l.pageInfo.Total { - return false - } - - var cancel context.CancelFunc - ctx := l.js.opts.ctx - if ctx == nil { - ctx, cancel = context.WithTimeout(context.Background(), l.js.opts.wait) - defer cancel() - } - - req, err := json.Marshal(streamNamesRequest{ - apiPagedRequest: apiPagedRequest{Offset: l.offset}, - Subject: l.js.opts.streamListSubject, - }) - if err != nil { - l.err = err - return false - } - r, err := l.js.apiRequestWithContext(ctx, l.js.apiSubj(apiStreams), req) - if err != nil { - l.err = err - return false - } - var resp streamNamesResponse - if err := json.Unmarshal(r.Data, &resp); err != nil { - l.err = err - return false - } - if resp.Error != nil { - l.err = resp.Error - return false - } - - l.pageInfo = &resp.apiPaged - l.page = resp.Streams - l.offset += len(l.page) - return true -} - -// Page returns the current ConsumerInfo page. -func (l *streamNamesLister) Page() []string { - return l.page -} - -// Err returns any errors found while fetching pages. -func (l *streamNamesLister) Err() error { - return l.err -} - -// StreamNames is used to retrieve a list of Stream names. -func (jsc *js) StreamNames(opts ...JSOpt) <-chan string { - o, cancel, err := getJSContextOpts(jsc.opts, opts...) - if err != nil { - return nil - } - - ch := make(chan string) - l := &streamNamesLister{js: &js{nc: jsc.nc, opts: o}} - go func() { - if cancel != nil { - defer cancel() - } - defer close(ch) - for l.Next() { - for _, info := range l.Page() { - select { - case ch <- info: - case <-o.ctx.Done(): - return - } - } - } - }() - - return ch -} - -// StreamNameBySubject returns a stream name that matches the subject. -func (jsc *js) StreamNameBySubject(subj string, opts ...JSOpt) (string, error) { - o, cancel, err := getJSContextOpts(jsc.opts, opts...) - if err != nil { - return "", err - } - if cancel != nil { - defer cancel() - } - - var slr streamNamesResponse - req := &streamRequest{subj} - j, err := json.Marshal(req) - if err != nil { - return _EMPTY_, err - } - - resp, err := jsc.apiRequestWithContext(o.ctx, jsc.apiSubj(apiStreams), j) - if err != nil { - if err == ErrNoResponders { - err = ErrJetStreamNotEnabled - } - return _EMPTY_, err - } - if err := json.Unmarshal(resp.Data, &slr); err != nil { - return _EMPTY_, err - } - - if slr.Error != nil || len(slr.Streams) != 1 { - return _EMPTY_, ErrNoMatchingStream - } - return slr.Streams[0], nil -} - -func getJSContextOpts(defs *jsOpts, opts ...JSOpt) (*jsOpts, context.CancelFunc, error) { - var o jsOpts - for _, opt := range opts { - if err := opt.configureJSContext(&o); err != nil { - return nil, nil, err - } - } - - // Check for option collisions. Right now just timeout and context. - if o.ctx != nil && o.wait != 0 { - return nil, nil, ErrContextAndTimeout - } - if o.wait == 0 && o.ctx == nil { - o.wait = defs.wait - } - var cancel context.CancelFunc - if o.ctx == nil && o.wait > 0 { - o.ctx, cancel = context.WithTimeout(context.Background(), o.wait) - } - if o.pre == _EMPTY_ { - o.pre = defs.pre - } - - return &o, cancel, nil -} diff --git a/vendor/github.com/nats-io/nats.go/kv.go b/vendor/github.com/nats-io/nats.go/kv.go deleted file mode 100644 index 7382f4d8..00000000 --- a/vendor/github.com/nats-io/nats.go/kv.go +++ /dev/null @@ -1,1119 +0,0 @@ -// Copyright 2021-2022 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nats - -import ( - "context" - "errors" - "fmt" - "reflect" - "regexp" - "strconv" - "strings" - "sync" - "time" - - "github.com/nats-io/nats.go/internal/parser" -) - -// KeyValueManager is used to manage KeyValue stores. -type KeyValueManager interface { - // KeyValue will lookup and bind to an existing KeyValue store. - KeyValue(bucket string) (KeyValue, error) - // CreateKeyValue will create a KeyValue store with the following configuration. - CreateKeyValue(cfg *KeyValueConfig) (KeyValue, error) - // DeleteKeyValue will delete this KeyValue store (JetStream stream). - DeleteKeyValue(bucket string) error - // KeyValueStoreNames is used to retrieve a list of key value store names - KeyValueStoreNames() <-chan string - // KeyValueStores is used to retrieve a list of key value store statuses - KeyValueStores() <-chan KeyValueStatus -} - -// KeyValue contains methods to operate on a KeyValue store. -type KeyValue interface { - // Get returns the latest value for the key. - Get(key string) (entry KeyValueEntry, err error) - // GetRevision returns a specific revision value for the key. - GetRevision(key string, revision uint64) (entry KeyValueEntry, err error) - // Put will place the new value for the key into the store. - Put(key string, value []byte) (revision uint64, err error) - // PutString will place the string for the key into the store. - PutString(key string, value string) (revision uint64, err error) - // Create will add the key/value pair iff it does not exist. - Create(key string, value []byte) (revision uint64, err error) - // Update will update the value iff the latest revision matches. - Update(key string, value []byte, last uint64) (revision uint64, err error) - // Delete will place a delete marker and leave all revisions. - Delete(key string, opts ...DeleteOpt) error - // Purge will place a delete marker and remove all previous revisions. - Purge(key string, opts ...DeleteOpt) error - // Watch for any updates to keys that match the keys argument which could include wildcards. - // Watch will send a nil entry when it has received all initial values. - Watch(keys string, opts ...WatchOpt) (KeyWatcher, error) - // WatchAll will invoke the callback for all updates. - WatchAll(opts ...WatchOpt) (KeyWatcher, error) - // Keys will return all keys. - Keys(opts ...WatchOpt) ([]string, error) - // History will return all historical values for the key. - History(key string, opts ...WatchOpt) ([]KeyValueEntry, error) - // Bucket returns the current bucket name. - Bucket() string - // PurgeDeletes will remove all current delete markers. - PurgeDeletes(opts ...PurgeOpt) error - // Status retrieves the status and configuration of a bucket - Status() (KeyValueStatus, error) -} - -// KeyValueStatus is run-time status about a Key-Value bucket -type KeyValueStatus interface { - // Bucket the name of the bucket - Bucket() string - - // Values is how many messages are in the bucket, including historical values - Values() uint64 - - // History returns the configured history kept per key - History() int64 - - // TTL is how long the bucket keeps values for - TTL() time.Duration - - // BackingStore indicates what technology is used for storage of the bucket - BackingStore() string - - // Bytes returns the size in bytes of the bucket - Bytes() uint64 -} - -// KeyWatcher is what is returned when doing a watch. -type KeyWatcher interface { - // Context returns watcher context optionally provided by nats.Context option. - Context() context.Context - // Updates returns a channel to read any updates to entries. - Updates() <-chan KeyValueEntry - // Stop will stop this watcher. - Stop() error -} - -type WatchOpt interface { - configureWatcher(opts *watchOpts) error -} - -// For nats.Context() support. -func (ctx ContextOpt) configureWatcher(opts *watchOpts) error { - opts.ctx = ctx - return nil -} - -type watchOpts struct { - ctx context.Context - // Do not send delete markers to the update channel. - ignoreDeletes bool - // Include all history per subject, not just last one. - includeHistory bool - // Include only updates for keys. - updatesOnly bool - // retrieve only the meta data of the entry - metaOnly bool -} - -type watchOptFn func(opts *watchOpts) error - -func (opt watchOptFn) configureWatcher(opts *watchOpts) error { - return opt(opts) -} - -// IncludeHistory instructs the key watcher to include historical values as well. -func IncludeHistory() WatchOpt { - return watchOptFn(func(opts *watchOpts) error { - if opts.updatesOnly { - return errors.New("nats: include history can not be used with updates only") - } - opts.includeHistory = true - return nil - }) -} - -// UpdatesOnly instructs the key watcher to only include updates on values (without latest values when started). -func UpdatesOnly() WatchOpt { - return watchOptFn(func(opts *watchOpts) error { - if opts.includeHistory { - return errors.New("nats: updates only can not be used with include history") - } - opts.updatesOnly = true - return nil - }) -} - -// IgnoreDeletes will have the key watcher not pass any deleted keys. -func IgnoreDeletes() WatchOpt { - return watchOptFn(func(opts *watchOpts) error { - opts.ignoreDeletes = true - return nil - }) -} - -// MetaOnly instructs the key watcher to retrieve only the entry meta data, not the entry value -func MetaOnly() WatchOpt { - return watchOptFn(func(opts *watchOpts) error { - opts.metaOnly = true - return nil - }) -} - -type PurgeOpt interface { - configurePurge(opts *purgeOpts) error -} - -type purgeOpts struct { - dmthr time.Duration // Delete markers threshold - ctx context.Context -} - -// DeleteMarkersOlderThan indicates that delete or purge markers older than that -// will be deleted as part of PurgeDeletes() operation, otherwise, only the data -// will be removed but markers that are recent will be kept. -// Note that if no option is specified, the default is 30 minutes. You can set -// this option to a negative value to instruct to always remove the markers, -// regardless of their age. -type DeleteMarkersOlderThan time.Duration - -func (ttl DeleteMarkersOlderThan) configurePurge(opts *purgeOpts) error { - opts.dmthr = time.Duration(ttl) - return nil -} - -// For nats.Context() support. -func (ctx ContextOpt) configurePurge(opts *purgeOpts) error { - opts.ctx = ctx - return nil -} - -type DeleteOpt interface { - configureDelete(opts *deleteOpts) error -} - -type deleteOpts struct { - // Remove all previous revisions. - purge bool - - // Delete only if the latest revision matches. - revision uint64 -} - -type deleteOptFn func(opts *deleteOpts) error - -func (opt deleteOptFn) configureDelete(opts *deleteOpts) error { - return opt(opts) -} - -// LastRevision deletes if the latest revision matches. -func LastRevision(revision uint64) DeleteOpt { - return deleteOptFn(func(opts *deleteOpts) error { - opts.revision = revision - return nil - }) -} - -// purge removes all previous revisions. -func purge() DeleteOpt { - return deleteOptFn(func(opts *deleteOpts) error { - opts.purge = true - return nil - }) -} - -// KeyValueConfig is for configuring a KeyValue store. -type KeyValueConfig struct { - Bucket string - Description string - MaxValueSize int32 - History uint8 - TTL time.Duration - MaxBytes int64 - Storage StorageType - Replicas int - Placement *Placement - RePublish *RePublish - Mirror *StreamSource - Sources []*StreamSource -} - -// Used to watch all keys. -const ( - KeyValueMaxHistory = 64 - AllKeys = ">" - kvLatestRevision = 0 - kvop = "KV-Operation" - kvdel = "DEL" - kvpurge = "PURGE" -) - -type KeyValueOp uint8 - -const ( - KeyValuePut KeyValueOp = iota - KeyValueDelete - KeyValuePurge -) - -func (op KeyValueOp) String() string { - switch op { - case KeyValuePut: - return "KeyValuePutOp" - case KeyValueDelete: - return "KeyValueDeleteOp" - case KeyValuePurge: - return "KeyValuePurgeOp" - default: - return "Unknown Operation" - } -} - -// KeyValueEntry is a retrieved entry for Get or List or Watch. -type KeyValueEntry interface { - // Bucket is the bucket the data was loaded from. - Bucket() string - // Key is the key that was retrieved. - Key() string - // Value is the retrieved value. - Value() []byte - // Revision is a unique sequence for this value. - Revision() uint64 - // Created is the time the data was put in the bucket. - Created() time.Time - // Delta is distance from the latest value. - Delta() uint64 - // Operation returns Put or Delete or Purge. - Operation() KeyValueOp -} - -// Errors -var ( - ErrKeyValueConfigRequired = errors.New("nats: config required") - ErrInvalidBucketName = errors.New("nats: invalid bucket name") - ErrInvalidKey = errors.New("nats: invalid key") - ErrBucketNotFound = errors.New("nats: bucket not found") - ErrBadBucket = errors.New("nats: bucket not valid key-value store") - ErrKeyNotFound = errors.New("nats: key not found") - ErrKeyDeleted = errors.New("nats: key was deleted") - ErrHistoryToLarge = errors.New("nats: history limited to a max of 64") - ErrNoKeysFound = errors.New("nats: no keys found") -) - -var ( - ErrKeyExists JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamWrongLastSequence, Code: 400}, message: "key exists"} -) - -const ( - kvBucketNamePre = "KV_" - kvBucketNameTmpl = "KV_%s" - kvSubjectsTmpl = "$KV.%s.>" - kvSubjectsPreTmpl = "$KV.%s." - kvSubjectsPreDomainTmpl = "%s.$KV.%s." - kvNoPending = "0" -) - -// Regex for valid keys and buckets. -var ( - validBucketRe = regexp.MustCompile(`\A[a-zA-Z0-9_-]+\z`) - validKeyRe = regexp.MustCompile(`\A[-/_=\.a-zA-Z0-9]+\z`) -) - -// KeyValue will lookup and bind to an existing KeyValue store. -func (js *js) KeyValue(bucket string) (KeyValue, error) { - if !js.nc.serverMinVersion(2, 6, 2) { - return nil, errors.New("nats: key-value requires at least server version 2.6.2") - } - if !validBucketRe.MatchString(bucket) { - return nil, ErrInvalidBucketName - } - stream := fmt.Sprintf(kvBucketNameTmpl, bucket) - si, err := js.StreamInfo(stream) - if err != nil { - if err == ErrStreamNotFound { - err = ErrBucketNotFound - } - return nil, err - } - // Do some quick sanity checks that this is a correctly formed stream for KV. - // Max msgs per subject should be > 0. - if si.Config.MaxMsgsPerSubject < 1 { - return nil, ErrBadBucket - } - - return mapStreamToKVS(js, si), nil -} - -// CreateKeyValue will create a KeyValue store with the following configuration. -func (js *js) CreateKeyValue(cfg *KeyValueConfig) (KeyValue, error) { - if !js.nc.serverMinVersion(2, 6, 2) { - return nil, errors.New("nats: key-value requires at least server version 2.6.2") - } - if cfg == nil { - return nil, ErrKeyValueConfigRequired - } - if !validBucketRe.MatchString(cfg.Bucket) { - return nil, ErrInvalidBucketName - } - if _, err := js.AccountInfo(); err != nil { - return nil, err - } - - // Default to 1 for history. Max is 64 for now. - history := int64(1) - if cfg.History > 0 { - if cfg.History > KeyValueMaxHistory { - return nil, ErrHistoryToLarge - } - history = int64(cfg.History) - } - - replicas := cfg.Replicas - if replicas == 0 { - replicas = 1 - } - - // We will set explicitly some values so that we can do comparison - // if we get an "already in use" error and need to check if it is same. - maxBytes := cfg.MaxBytes - if maxBytes == 0 { - maxBytes = -1 - } - maxMsgSize := cfg.MaxValueSize - if maxMsgSize == 0 { - maxMsgSize = -1 - } - // When stream's MaxAge is not set, server uses 2 minutes as the default - // for the duplicate window. If MaxAge is set, and lower than 2 minutes, - // then the duplicate window will be set to that. If MaxAge is greater, - // we will cap the duplicate window to 2 minutes (to be consistent with - // previous behavior). - duplicateWindow := 2 * time.Minute - if cfg.TTL > 0 && cfg.TTL < duplicateWindow { - duplicateWindow = cfg.TTL - } - scfg := &StreamConfig{ - Name: fmt.Sprintf(kvBucketNameTmpl, cfg.Bucket), - Description: cfg.Description, - MaxMsgsPerSubject: history, - MaxBytes: maxBytes, - MaxAge: cfg.TTL, - MaxMsgSize: maxMsgSize, - Storage: cfg.Storage, - Replicas: replicas, - Placement: cfg.Placement, - AllowRollup: true, - DenyDelete: true, - Duplicates: duplicateWindow, - MaxMsgs: -1, - MaxConsumers: -1, - AllowDirect: true, - RePublish: cfg.RePublish, - } - if cfg.Mirror != nil { - // Copy in case we need to make changes so we do not change caller's version. - m := cfg.Mirror.copy() - if !strings.HasPrefix(m.Name, kvBucketNamePre) { - m.Name = fmt.Sprintf(kvBucketNameTmpl, m.Name) - } - scfg.Mirror = m - scfg.MirrorDirect = true - } else if len(cfg.Sources) > 0 { - for _, ss := range cfg.Sources { - var sourceBucketName string - if strings.HasPrefix(ss.Name, kvBucketNamePre) { - sourceBucketName = ss.Name[len(kvBucketNamePre):] - } else { - sourceBucketName = ss.Name - ss.Name = fmt.Sprintf(kvBucketNameTmpl, ss.Name) - } - - if ss.External == nil || sourceBucketName != cfg.Bucket { - ss.SubjectTransforms = []SubjectTransformConfig{{Source: fmt.Sprintf(kvSubjectsTmpl, sourceBucketName), Destination: fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)}} - } - scfg.Sources = append(scfg.Sources, ss) - } - scfg.Subjects = []string{fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)} - } else { - scfg.Subjects = []string{fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)} - } - - // If we are at server version 2.7.2 or above use DiscardNew. We can not use DiscardNew for 2.7.1 or below. - if js.nc.serverMinVersion(2, 7, 2) { - scfg.Discard = DiscardNew - } - - si, err := js.AddStream(scfg) - if err != nil { - // If we have a failure to add, it could be because we have - // a config change if the KV was created against a pre 2.7.2 - // and we are now moving to a v2.7.2+. If that is the case - // and the only difference is the discard policy, then update - // the stream. - // The same logic applies for KVs created pre 2.9.x and - // the AllowDirect setting. - if err == ErrStreamNameAlreadyInUse { - if si, _ = js.StreamInfo(scfg.Name); si != nil { - // To compare, make the server's stream info discard - // policy same than ours. - si.Config.Discard = scfg.Discard - // Also need to set allow direct for v2.9.x+ - si.Config.AllowDirect = scfg.AllowDirect - if reflect.DeepEqual(&si.Config, scfg) { - si, err = js.UpdateStream(scfg) - } - } - } - if err != nil { - return nil, err - } - } - return mapStreamToKVS(js, si), nil -} - -// DeleteKeyValue will delete this KeyValue store (JetStream stream). -func (js *js) DeleteKeyValue(bucket string) error { - if !validBucketRe.MatchString(bucket) { - return ErrInvalidBucketName - } - stream := fmt.Sprintf(kvBucketNameTmpl, bucket) - return js.DeleteStream(stream) -} - -type kvs struct { - name string - stream string - pre string - putPre string - js *js - // If true, it means that APIPrefix/Domain was set in the context - // and we need to add something to some of our high level protocols - // (such as Put, etc..) - useJSPfx bool - // To know if we can use the stream direct get API - useDirect bool -} - -// Underlying entry. -type kve struct { - bucket string - key string - value []byte - revision uint64 - delta uint64 - created time.Time - op KeyValueOp -} - -func (e *kve) Bucket() string { return e.bucket } -func (e *kve) Key() string { return e.key } -func (e *kve) Value() []byte { return e.value } -func (e *kve) Revision() uint64 { return e.revision } -func (e *kve) Created() time.Time { return e.created } -func (e *kve) Delta() uint64 { return e.delta } -func (e *kve) Operation() KeyValueOp { return e.op } - -func keyValid(key string) bool { - if len(key) == 0 || key[0] == '.' || key[len(key)-1] == '.' { - return false - } - return validKeyRe.MatchString(key) -} - -// Get returns the latest value for the key. -func (kv *kvs) Get(key string) (KeyValueEntry, error) { - e, err := kv.get(key, kvLatestRevision) - if err != nil { - if err == ErrKeyDeleted { - return nil, ErrKeyNotFound - } - return nil, err - } - - return e, nil -} - -// GetRevision returns a specific revision value for the key. -func (kv *kvs) GetRevision(key string, revision uint64) (KeyValueEntry, error) { - e, err := kv.get(key, revision) - if err != nil { - if err == ErrKeyDeleted { - return nil, ErrKeyNotFound - } - return nil, err - } - - return e, nil -} - -func (kv *kvs) get(key string, revision uint64) (KeyValueEntry, error) { - if !keyValid(key) { - return nil, ErrInvalidKey - } - - var b strings.Builder - b.WriteString(kv.pre) - b.WriteString(key) - - var m *RawStreamMsg - var err error - var _opts [1]JSOpt - opts := _opts[:0] - if kv.useDirect { - opts = append(opts, DirectGet()) - } - - if revision == kvLatestRevision { - m, err = kv.js.GetLastMsg(kv.stream, b.String(), opts...) - } else { - m, err = kv.js.GetMsg(kv.stream, revision, opts...) - // If a sequence was provided, just make sure that the retrieved - // message subject matches the request. - if err == nil && m.Subject != b.String() { - return nil, ErrKeyNotFound - } - } - if err != nil { - if err == ErrMsgNotFound { - err = ErrKeyNotFound - } - return nil, err - } - - entry := &kve{ - bucket: kv.name, - key: key, - value: m.Data, - revision: m.Sequence, - created: m.Time, - } - - // Double check here that this is not a DEL Operation marker. - if len(m.Header) > 0 { - switch m.Header.Get(kvop) { - case kvdel: - entry.op = KeyValueDelete - return entry, ErrKeyDeleted - case kvpurge: - entry.op = KeyValuePurge - return entry, ErrKeyDeleted - } - } - - return entry, nil -} - -// Put will place the new value for the key into the store. -func (kv *kvs) Put(key string, value []byte) (revision uint64, err error) { - if !keyValid(key) { - return 0, ErrInvalidKey - } - - var b strings.Builder - if kv.useJSPfx { - b.WriteString(kv.js.opts.pre) - } - if kv.putPre != _EMPTY_ { - b.WriteString(kv.putPre) - } else { - b.WriteString(kv.pre) - } - b.WriteString(key) - - pa, err := kv.js.Publish(b.String(), value) - if err != nil { - return 0, err - } - return pa.Sequence, err -} - -// PutString will place the string for the key into the store. -func (kv *kvs) PutString(key string, value string) (revision uint64, err error) { - return kv.Put(key, []byte(value)) -} - -// Create will add the key/value pair if it does not exist. -func (kv *kvs) Create(key string, value []byte) (revision uint64, err error) { - v, err := kv.Update(key, value, 0) - if err == nil { - return v, nil - } - - // TODO(dlc) - Since we have tombstones for DEL ops for watchers, this could be from that - // so we need to double check. - if e, err := kv.get(key, kvLatestRevision); err == ErrKeyDeleted { - return kv.Update(key, value, e.Revision()) - } - - // Check if the expected last subject sequence is not zero which implies - // the key already exists. - if errors.Is(err, ErrKeyExists) { - jserr := ErrKeyExists.(*jsError) - return 0, fmt.Errorf("%w: %s", err, jserr.message) - } - - return 0, err -} - -// Update will update the value if the latest revision matches. -func (kv *kvs) Update(key string, value []byte, revision uint64) (uint64, error) { - if !keyValid(key) { - return 0, ErrInvalidKey - } - - var b strings.Builder - if kv.useJSPfx { - b.WriteString(kv.js.opts.pre) - } - b.WriteString(kv.pre) - b.WriteString(key) - - m := Msg{Subject: b.String(), Header: Header{}, Data: value} - m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(revision, 10)) - - pa, err := kv.js.PublishMsg(&m) - if err != nil { - return 0, err - } - return pa.Sequence, err -} - -// Delete will place a delete marker and leave all revisions. -func (kv *kvs) Delete(key string, opts ...DeleteOpt) error { - if !keyValid(key) { - return ErrInvalidKey - } - - var b strings.Builder - if kv.useJSPfx { - b.WriteString(kv.js.opts.pre) - } - if kv.putPre != _EMPTY_ { - b.WriteString(kv.putPre) - } else { - b.WriteString(kv.pre) - } - b.WriteString(key) - - // DEL op marker. For watch functionality. - m := NewMsg(b.String()) - - var o deleteOpts - for _, opt := range opts { - if opt != nil { - if err := opt.configureDelete(&o); err != nil { - return err - } - } - } - - if o.purge { - m.Header.Set(kvop, kvpurge) - m.Header.Set(MsgRollup, MsgRollupSubject) - } else { - m.Header.Set(kvop, kvdel) - } - - if o.revision != 0 { - m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(o.revision, 10)) - } - - _, err := kv.js.PublishMsg(m) - return err -} - -// Purge will remove the key and all revisions. -func (kv *kvs) Purge(key string, opts ...DeleteOpt) error { - return kv.Delete(key, append(opts, purge())...) -} - -const kvDefaultPurgeDeletesMarkerThreshold = 30 * time.Minute - -// PurgeDeletes will remove all current delete markers. -// This is a maintenance option if there is a larger buildup of delete markers. -// See DeleteMarkersOlderThan() option for more information. -func (kv *kvs) PurgeDeletes(opts ...PurgeOpt) error { - var o purgeOpts - for _, opt := range opts { - if opt != nil { - if err := opt.configurePurge(&o); err != nil { - return err - } - } - } - // Transfer possible context purge option to the watcher. This is the - // only option that matters for the PurgeDeletes() feature. - var wopts []WatchOpt - if o.ctx != nil { - wopts = append(wopts, Context(o.ctx)) - } - watcher, err := kv.WatchAll(wopts...) - if err != nil { - return err - } - defer watcher.Stop() - - var limit time.Time - olderThan := o.dmthr - // Negative value is used to instruct to always remove markers, regardless - // of age. If set to 0 (or not set), use our default value. - if olderThan == 0 { - olderThan = kvDefaultPurgeDeletesMarkerThreshold - } - if olderThan > 0 { - limit = time.Now().Add(-olderThan) - } - - var deleteMarkers []KeyValueEntry - for entry := range watcher.Updates() { - if entry == nil { - break - } - if op := entry.Operation(); op == KeyValueDelete || op == KeyValuePurge { - deleteMarkers = append(deleteMarkers, entry) - } - } - - var ( - pr StreamPurgeRequest - b strings.Builder - ) - // Do actual purges here. - for _, entry := range deleteMarkers { - b.WriteString(kv.pre) - b.WriteString(entry.Key()) - pr.Subject = b.String() - pr.Keep = 0 - if olderThan > 0 && entry.Created().After(limit) { - pr.Keep = 1 - } - if err := kv.js.purgeStream(kv.stream, &pr); err != nil { - return err - } - b.Reset() - } - return nil -} - -// Keys() will return all keys. -func (kv *kvs) Keys(opts ...WatchOpt) ([]string, error) { - opts = append(opts, IgnoreDeletes(), MetaOnly()) - watcher, err := kv.WatchAll(opts...) - if err != nil { - return nil, err - } - defer watcher.Stop() - - var keys []string - for entry := range watcher.Updates() { - if entry == nil { - break - } - keys = append(keys, entry.Key()) - } - if len(keys) == 0 { - return nil, ErrNoKeysFound - } - return keys, nil -} - -// History will return all values for the key. -func (kv *kvs) History(key string, opts ...WatchOpt) ([]KeyValueEntry, error) { - opts = append(opts, IncludeHistory()) - watcher, err := kv.Watch(key, opts...) - if err != nil { - return nil, err - } - defer watcher.Stop() - - var entries []KeyValueEntry - for entry := range watcher.Updates() { - if entry == nil { - break - } - entries = append(entries, entry) - } - if len(entries) == 0 { - return nil, ErrKeyNotFound - } - return entries, nil -} - -// Implementation for Watch -type watcher struct { - mu sync.Mutex - updates chan KeyValueEntry - sub *Subscription - initDone bool - initPending uint64 - received uint64 - ctx context.Context -} - -// Context returns the context for the watcher if set. -func (w *watcher) Context() context.Context { - if w == nil { - return nil - } - return w.ctx -} - -// Updates returns the interior channel. -func (w *watcher) Updates() <-chan KeyValueEntry { - if w == nil { - return nil - } - return w.updates -} - -// Stop will unsubscribe from the watcher. -func (w *watcher) Stop() error { - if w == nil { - return nil - } - return w.sub.Unsubscribe() -} - -// WatchAll watches all keys. -func (kv *kvs) WatchAll(opts ...WatchOpt) (KeyWatcher, error) { - return kv.Watch(AllKeys, opts...) -} - -// Watch will fire the callback when a key that matches the keys pattern is updated. -// keys needs to be a valid NATS subject. -func (kv *kvs) Watch(keys string, opts ...WatchOpt) (KeyWatcher, error) { - var o watchOpts - for _, opt := range opts { - if opt != nil { - if err := opt.configureWatcher(&o); err != nil { - return nil, err - } - } - } - - // Could be a pattern so don't check for validity as we normally do. - var b strings.Builder - b.WriteString(kv.pre) - b.WriteString(keys) - keys = b.String() - - // We will block below on placing items on the chan. That is by design. - w := &watcher{updates: make(chan KeyValueEntry, 256), ctx: o.ctx} - - update := func(m *Msg) { - tokens, err := parser.GetMetadataFields(m.Reply) - if err != nil { - return - } - if len(m.Subject) <= len(kv.pre) { - return - } - subj := m.Subject[len(kv.pre):] - - var op KeyValueOp - if len(m.Header) > 0 { - switch m.Header.Get(kvop) { - case kvdel: - op = KeyValueDelete - case kvpurge: - op = KeyValuePurge - } - } - delta := parser.ParseNum(tokens[parser.AckNumPendingTokenPos]) - w.mu.Lock() - defer w.mu.Unlock() - if !o.ignoreDeletes || (op != KeyValueDelete && op != KeyValuePurge) { - entry := &kve{ - bucket: kv.name, - key: subj, - value: m.Data, - revision: parser.ParseNum(tokens[parser.AckStreamSeqTokenPos]), - created: time.Unix(0, int64(parser.ParseNum(tokens[parser.AckTimestampSeqTokenPos]))), - delta: delta, - op: op, - } - w.updates <- entry - } - // Check if done and initial values. - // Skip if UpdatesOnly() is set, since there will never be updates initially. - if !w.initDone { - w.received++ - // We set this on the first trip through.. - if w.initPending == 0 { - w.initPending = delta - } - if w.received > w.initPending || delta == 0 { - w.initDone = true - w.updates <- nil - } - } - } - - // Used ordered consumer to deliver results. - subOpts := []SubOpt{BindStream(kv.stream), OrderedConsumer()} - if !o.includeHistory { - subOpts = append(subOpts, DeliverLastPerSubject()) - } - if o.updatesOnly { - subOpts = append(subOpts, DeliverNew()) - } - if o.metaOnly { - subOpts = append(subOpts, HeadersOnly()) - } - if o.ctx != nil { - subOpts = append(subOpts, Context(o.ctx)) - } - // Create the sub and rest of initialization under the lock. - // We want to prevent the race between this code and the - // update() callback. - w.mu.Lock() - defer w.mu.Unlock() - sub, err := kv.js.Subscribe(keys, update, subOpts...) - if err != nil { - return nil, err - } - sub.mu.Lock() - // If there were no pending messages at the time of the creation - // of the consumer, send the marker. - // Skip if UpdatesOnly() is set, since there will never be updates initially. - if !o.updatesOnly { - if sub.jsi != nil && sub.jsi.pending == 0 { - w.initDone = true - w.updates <- nil - } - } else { - // if UpdatesOnly was used, mark initialization as complete - w.initDone = true - } - // Set us up to close when the waitForMessages func returns. - sub.pDone = func(_ string) { - close(w.updates) - } - sub.mu.Unlock() - - w.sub = sub - return w, nil -} - -// Bucket returns the current bucket name (JetStream stream). -func (kv *kvs) Bucket() string { - return kv.name -} - -// KeyValueBucketStatus represents status of a Bucket, implements KeyValueStatus -type KeyValueBucketStatus struct { - nfo *StreamInfo - bucket string -} - -// Bucket the name of the bucket -func (s *KeyValueBucketStatus) Bucket() string { return s.bucket } - -// Values is how many messages are in the bucket, including historical values -func (s *KeyValueBucketStatus) Values() uint64 { return s.nfo.State.Msgs } - -// History returns the configured history kept per key -func (s *KeyValueBucketStatus) History() int64 { return s.nfo.Config.MaxMsgsPerSubject } - -// TTL is how long the bucket keeps values for -func (s *KeyValueBucketStatus) TTL() time.Duration { return s.nfo.Config.MaxAge } - -// BackingStore indicates what technology is used for storage of the bucket -func (s *KeyValueBucketStatus) BackingStore() string { return "JetStream" } - -// StreamInfo is the stream info retrieved to create the status -func (s *KeyValueBucketStatus) StreamInfo() *StreamInfo { return s.nfo } - -// Bytes is the size of the stream -func (s *KeyValueBucketStatus) Bytes() uint64 { return s.nfo.State.Bytes } - -// Status retrieves the status and configuration of a bucket -func (kv *kvs) Status() (KeyValueStatus, error) { - nfo, err := kv.js.StreamInfo(kv.stream) - if err != nil { - return nil, err - } - - return &KeyValueBucketStatus{nfo: nfo, bucket: kv.name}, nil -} - -// KeyValueStoreNames is used to retrieve a list of key value store names -func (js *js) KeyValueStoreNames() <-chan string { - ch := make(chan string) - l := &streamNamesLister{js: js} - l.js.opts.streamListSubject = fmt.Sprintf(kvSubjectsTmpl, "*") - go func() { - defer close(ch) - for l.Next() { - for _, name := range l.Page() { - if !strings.HasPrefix(name, kvBucketNamePre) { - continue - } - ch <- name - } - } - }() - - return ch -} - -// KeyValueStores is used to retrieve a list of key value store statuses -func (js *js) KeyValueStores() <-chan KeyValueStatus { - ch := make(chan KeyValueStatus) - l := &streamLister{js: js} - l.js.opts.streamListSubject = fmt.Sprintf(kvSubjectsTmpl, "*") - go func() { - defer close(ch) - for l.Next() { - for _, info := range l.Page() { - if !strings.HasPrefix(info.Config.Name, kvBucketNamePre) { - continue - } - ch <- &KeyValueBucketStatus{nfo: info, bucket: strings.TrimPrefix(info.Config.Name, kvBucketNamePre)} - } - } - }() - return ch -} - -func mapStreamToKVS(js *js, info *StreamInfo) *kvs { - bucket := strings.TrimPrefix(info.Config.Name, kvBucketNamePre) - - kv := &kvs{ - name: bucket, - stream: info.Config.Name, - pre: fmt.Sprintf(kvSubjectsPreTmpl, bucket), - js: js, - // Determine if we need to use the JS prefix in front of Put and Delete operations - useJSPfx: js.opts.pre != defaultAPIPrefix, - useDirect: info.Config.AllowDirect, - } - - // If we are mirroring, we will have mirror direct on, so just use the mirror name - // and override use - if m := info.Config.Mirror; m != nil { - bucket := strings.TrimPrefix(m.Name, kvBucketNamePre) - if m.External != nil && m.External.APIPrefix != _EMPTY_ { - kv.useJSPfx = false - kv.pre = fmt.Sprintf(kvSubjectsPreTmpl, bucket) - kv.putPre = fmt.Sprintf(kvSubjectsPreDomainTmpl, m.External.APIPrefix, bucket) - } else { - kv.putPre = fmt.Sprintf(kvSubjectsPreTmpl, bucket) - } - } - - return kv -} diff --git a/vendor/github.com/nats-io/nats.go/legacy_jetstream.md b/vendor/github.com/nats-io/nats.go/legacy_jetstream.md deleted file mode 100644 index 43e1c73b..00000000 --- a/vendor/github.com/nats-io/nats.go/legacy_jetstream.md +++ /dev/null @@ -1,83 +0,0 @@ -# Legacy JetStream API - -This is a documentation for the legacy JetStream API. A README for the current -API can be found [here](jetstream/README.md) - -## JetStream Basic Usage - -```go -import "github.com/nats-io/nats.go" - -// Connect to NATS -nc, _ := nats.Connect(nats.DefaultURL) - -// Create JetStream Context -js, _ := nc.JetStream(nats.PublishAsyncMaxPending(256)) - -// Simple Stream Publisher -js.Publish("ORDERS.scratch", []byte("hello")) - -// Simple Async Stream Publisher -for i := 0; i < 500; i++ { - js.PublishAsync("ORDERS.scratch", []byte("hello")) -} -select { -case <-js.PublishAsyncComplete(): -case <-time.After(5 * time.Second): - fmt.Println("Did not resolve in time") -} - -// Simple Async Ephemeral Consumer -js.Subscribe("ORDERS.*", func(m *nats.Msg) { - fmt.Printf("Received a JetStream message: %s\n", string(m.Data)) -}) - -// Simple Sync Durable Consumer (optional SubOpts at the end) -sub, err := js.SubscribeSync("ORDERS.*", nats.Durable("MONITOR"), nats.MaxDeliver(3)) -m, err := sub.NextMsg(timeout) - -// Simple Pull Consumer -sub, err := js.PullSubscribe("ORDERS.*", "MONITOR") -msgs, err := sub.Fetch(10) - -// Unsubscribe -sub.Unsubscribe() - -// Drain -sub.Drain() -``` - -## JetStream Basic Management - -```go -import "github.com/nats-io/nats.go" - -// Connect to NATS -nc, _ := nats.Connect(nats.DefaultURL) - -// Create JetStream Context -js, _ := nc.JetStream() - -// Create a Stream -js.AddStream(&nats.StreamConfig{ - Name: "ORDERS", - Subjects: []string{"ORDERS.*"}, -}) - -// Update a Stream -js.UpdateStream(&nats.StreamConfig{ - Name: "ORDERS", - MaxBytes: 8, -}) - -// Create a Consumer -js.AddConsumer("ORDERS", &nats.ConsumerConfig{ - Durable: "MONITOR", -}) - -// Delete Consumer -js.DeleteConsumer("ORDERS", "MONITOR") - -// Delete Stream -js.DeleteStream("ORDERS") -``` diff --git a/vendor/github.com/nats-io/nats.go/nats.go b/vendor/github.com/nats-io/nats.go/nats.go deleted file mode 100644 index 82b79730..00000000 --- a/vendor/github.com/nats-io/nats.go/nats.go +++ /dev/null @@ -1,5673 +0,0 @@ -// Copyright 2012-2023 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// A Go client for the NATS messaging system (https://nats.io). -package nats - -import ( - "bufio" - "bytes" - "crypto/tls" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "math/rand" - "net" - "net/http" - "net/textproto" - "net/url" - "os" - "path/filepath" - "regexp" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/nats-io/nkeys" - "github.com/nats-io/nuid" - - "github.com/nats-io/nats.go/util" -) - -// Default Constants -const ( - Version = "1.30.2" - DefaultURL = "nats://127.0.0.1:4222" - DefaultPort = 4222 - DefaultMaxReconnect = 60 - DefaultReconnectWait = 2 * time.Second - DefaultReconnectJitter = 100 * time.Millisecond - DefaultReconnectJitterTLS = time.Second - DefaultTimeout = 2 * time.Second - DefaultPingInterval = 2 * time.Minute - DefaultMaxPingOut = 2 - DefaultMaxChanLen = 64 * 1024 // 64k - DefaultReconnectBufSize = 8 * 1024 * 1024 // 8MB - RequestChanLen = 8 - DefaultDrainTimeout = 30 * time.Second - DefaultFlusherTimeout = time.Minute - LangString = "go" -) - -const ( - // STALE_CONNECTION is for detection and proper handling of stale connections. - STALE_CONNECTION = "stale connection" - - // PERMISSIONS_ERR is for when nats server subject authorization has failed. - PERMISSIONS_ERR = "permissions violation" - - // AUTHORIZATION_ERR is for when nats server user authorization has failed. - AUTHORIZATION_ERR = "authorization violation" - - // AUTHENTICATION_EXPIRED_ERR is for when nats server user authorization has expired. - AUTHENTICATION_EXPIRED_ERR = "user authentication expired" - - // AUTHENTICATION_REVOKED_ERR is for when user authorization has been revoked. - AUTHENTICATION_REVOKED_ERR = "user authentication revoked" - - // ACCOUNT_AUTHENTICATION_EXPIRED_ERR is for when nats server account authorization has expired. - ACCOUNT_AUTHENTICATION_EXPIRED_ERR = "account authentication expired" - - // MAX_CONNECTIONS_ERR is for when nats server denies the connection due to server max_connections limit - MAX_CONNECTIONS_ERR = "maximum connections exceeded" -) - -// Errors -var ( - ErrConnectionClosed = errors.New("nats: connection closed") - ErrConnectionDraining = errors.New("nats: connection draining") - ErrDrainTimeout = errors.New("nats: draining connection timed out") - ErrConnectionReconnecting = errors.New("nats: connection reconnecting") - ErrSecureConnRequired = errors.New("nats: secure connection required") - ErrSecureConnWanted = errors.New("nats: secure connection not available") - ErrBadSubscription = errors.New("nats: invalid subscription") - ErrTypeSubscription = errors.New("nats: invalid subscription type") - ErrBadSubject = errors.New("nats: invalid subject") - ErrBadQueueName = errors.New("nats: invalid queue name") - ErrSlowConsumer = errors.New("nats: slow consumer, messages dropped") - ErrTimeout = errors.New("nats: timeout") - ErrBadTimeout = errors.New("nats: timeout invalid") - ErrAuthorization = errors.New("nats: authorization violation") - ErrAuthExpired = errors.New("nats: authentication expired") - ErrAuthRevoked = errors.New("nats: authentication revoked") - ErrAccountAuthExpired = errors.New("nats: account authentication expired") - ErrNoServers = errors.New("nats: no servers available for connection") - ErrJsonParse = errors.New("nats: connect message, json parse error") - ErrChanArg = errors.New("nats: argument needs to be a channel type") - ErrMaxPayload = errors.New("nats: maximum payload exceeded") - ErrMaxMessages = errors.New("nats: maximum messages delivered") - ErrSyncSubRequired = errors.New("nats: illegal call on an async subscription") - ErrMultipleTLSConfigs = errors.New("nats: multiple tls.Configs not allowed") - ErrNoInfoReceived = errors.New("nats: protocol exception, INFO not received") - ErrReconnectBufExceeded = errors.New("nats: outbound buffer limit exceeded") - ErrInvalidConnection = errors.New("nats: invalid connection") - ErrInvalidMsg = errors.New("nats: invalid message or message nil") - ErrInvalidArg = errors.New("nats: invalid argument") - ErrInvalidContext = errors.New("nats: invalid context") - ErrNoDeadlineContext = errors.New("nats: context requires a deadline") - ErrNoEchoNotSupported = errors.New("nats: no echo option not supported by this server") - ErrClientIDNotSupported = errors.New("nats: client ID not supported by this server") - ErrUserButNoSigCB = errors.New("nats: user callback defined without a signature handler") - ErrNkeyButNoSigCB = errors.New("nats: nkey defined without a signature handler") - ErrNoUserCB = errors.New("nats: user callback not defined") - ErrNkeyAndUser = errors.New("nats: user callback and nkey defined") - ErrNkeysNotSupported = errors.New("nats: nkeys not supported by the server") - ErrStaleConnection = errors.New("nats: " + STALE_CONNECTION) - ErrTokenAlreadySet = errors.New("nats: token and token handler both set") - ErrMsgNotBound = errors.New("nats: message is not bound to subscription/connection") - ErrMsgNoReply = errors.New("nats: message does not have a reply") - ErrClientIPNotSupported = errors.New("nats: client IP not supported by this server") - ErrDisconnected = errors.New("nats: server is disconnected") - ErrHeadersNotSupported = errors.New("nats: headers not supported by this server") - ErrBadHeaderMsg = errors.New("nats: message could not decode headers") - ErrNoResponders = errors.New("nats: no responders available for request") - ErrMaxConnectionsExceeded = errors.New("nats: server maximum connections exceeded") - ErrConnectionNotTLS = errors.New("nats: connection is not tls") -) - -// GetDefaultOptions returns default configuration options for the client. -func GetDefaultOptions() Options { - return Options{ - AllowReconnect: true, - MaxReconnect: DefaultMaxReconnect, - ReconnectWait: DefaultReconnectWait, - ReconnectJitter: DefaultReconnectJitter, - ReconnectJitterTLS: DefaultReconnectJitterTLS, - Timeout: DefaultTimeout, - PingInterval: DefaultPingInterval, - MaxPingsOut: DefaultMaxPingOut, - SubChanLen: DefaultMaxChanLen, - ReconnectBufSize: DefaultReconnectBufSize, - DrainTimeout: DefaultDrainTimeout, - FlusherTimeout: DefaultFlusherTimeout, - } -} - -// DEPRECATED: Use GetDefaultOptions() instead. -// DefaultOptions is not safe for use by multiple clients. -// For details see #308. -var DefaultOptions = GetDefaultOptions() - -// Status represents the state of the connection. -type Status int - -const ( - DISCONNECTED = Status(iota) - CONNECTED - CLOSED - RECONNECTING - CONNECTING - DRAINING_SUBS - DRAINING_PUBS -) - -func (s Status) String() string { - switch s { - case DISCONNECTED: - return "DISCONNECTED" - case CONNECTED: - return "CONNECTED" - case CLOSED: - return "CLOSED" - case RECONNECTING: - return "RECONNECTING" - case CONNECTING: - return "CONNECTING" - case DRAINING_SUBS: - return "DRAINING_SUBS" - case DRAINING_PUBS: - return "DRAINING_PUBS" - } - return "unknown status" -} - -// ConnHandler is used for asynchronous events such as -// disconnected and closed connections. -type ConnHandler func(*Conn) - -// ConnErrHandler is used to process asynchronous events like -// disconnected connection with the error (if any). -type ConnErrHandler func(*Conn, error) - -// ErrHandler is used to process asynchronous errors encountered -// while processing inbound messages. -type ErrHandler func(*Conn, *Subscription, error) - -// UserJWTHandler is used to fetch and return the account signed -// JWT for this user. -type UserJWTHandler func() (string, error) - -// TLSCertHandler is used to fetch and return tls certificate. -type TLSCertHandler func() (tls.Certificate, error) - -// RootCAsHandler is used to fetch and return a set of root certificate -// authorities that clients use when verifying server certificates. -type RootCAsHandler func() (*x509.CertPool, error) - -// SignatureHandler is used to sign a nonce from the server while -// authenticating with nkeys. The user should sign the nonce and -// return the raw signature. The client will base64 encode this to -// send to the server. -type SignatureHandler func([]byte) ([]byte, error) - -// AuthTokenHandler is used to generate a new token. -type AuthTokenHandler func() string - -// ReconnectDelayHandler is used to get from the user the desired -// delay the library should pause before attempting to reconnect -// again. Note that this is invoked after the library tried the -// whole list of URLs and failed to reconnect. -type ReconnectDelayHandler func(attempts int) time.Duration - -// asyncCB is used to preserve order for async callbacks. -type asyncCB struct { - f func() - next *asyncCB -} - -type asyncCallbacksHandler struct { - mu sync.Mutex - cond *sync.Cond - head *asyncCB - tail *asyncCB -} - -// Option is a function on the options for a connection. -type Option func(*Options) error - -// CustomDialer can be used to specify any dialer, not necessarily a -// *net.Dialer. A CustomDialer may also implement `SkipTLSHandshake() bool` -// in order to skip the TLS handshake in case not required. -type CustomDialer interface { - Dial(network, address string) (net.Conn, error) -} - -type InProcessConnProvider interface { - InProcessConn() (net.Conn, error) -} - -// Options can be used to create a customized connection. -type Options struct { - - // Url represents a single NATS server url to which the client - // will be connecting. If the Servers option is also set, it - // then becomes the first server in the Servers array. - Url string - - // InProcessServer represents a NATS server running within the - // same process. If this is set then we will attempt to connect - // to the server directly rather than using external TCP conns. - InProcessServer InProcessConnProvider - - // Servers is a configured set of servers which this client - // will use when attempting to connect. - Servers []string - - // NoRandomize configures whether we will randomize the - // server pool. - NoRandomize bool - - // NoEcho configures whether the server will echo back messages - // that are sent on this connection if we also have matching subscriptions. - // Note this is supported on servers >= version 1.2. Proto 1 or greater. - NoEcho bool - - // Name is an optional name label which will be sent to the server - // on CONNECT to identify the client. - Name string - - // Verbose signals the server to send an OK ack for commands - // successfully processed by the server. - Verbose bool - - // Pedantic signals the server whether it should be doing further - // validation of subjects. - Pedantic bool - - // Secure enables TLS secure connections that skip server - // verification by default. NOT RECOMMENDED. - Secure bool - - // TLSConfig is a custom TLS configuration to use for secure - // transports. - TLSConfig *tls.Config - - // TLSCertCB is used to fetch and return custom tls certificate. - TLSCertCB TLSCertHandler - - // RootCAsCB is used to fetch and return a set of root certificate - // authorities that clients use when verifying server certificates. - RootCAsCB RootCAsHandler - - // AllowReconnect enables reconnection logic to be used when we - // encounter a disconnect from the current server. - AllowReconnect bool - - // MaxReconnect sets the number of reconnect attempts that will be - // tried before giving up. If negative, then it will never give up - // trying to reconnect. - // Defaults to 60. - MaxReconnect int - - // ReconnectWait sets the time to backoff after attempting a reconnect - // to a server that we were already connected to previously. - // Defaults to 2s. - ReconnectWait time.Duration - - // CustomReconnectDelayCB is invoked after the library tried every - // URL in the server list and failed to reconnect. It passes to the - // user the current number of attempts. This function returns the - // amount of time the library will sleep before attempting to reconnect - // again. It is strongly recommended that this value contains some - // jitter to prevent all connections to attempt reconnecting at the same time. - CustomReconnectDelayCB ReconnectDelayHandler - - // ReconnectJitter sets the upper bound for a random delay added to - // ReconnectWait during a reconnect when no TLS is used. - // Defaults to 100ms. - ReconnectJitter time.Duration - - // ReconnectJitterTLS sets the upper bound for a random delay added to - // ReconnectWait during a reconnect when TLS is used. - // Defaults to 1s. - ReconnectJitterTLS time.Duration - - // Timeout sets the timeout for a Dial operation on a connection. - // Defaults to 2s. - Timeout time.Duration - - // DrainTimeout sets the timeout for a Drain Operation to complete. - // Defaults to 30s. - DrainTimeout time.Duration - - // FlusherTimeout is the maximum time to wait for write operations - // to the underlying connection to complete (including the flusher loop). - // Defaults to 1m. - FlusherTimeout time.Duration - - // PingInterval is the period at which the client will be sending ping - // commands to the server, disabled if 0 or negative. - // Defaults to 2m. - PingInterval time.Duration - - // MaxPingsOut is the maximum number of pending ping commands that can - // be awaiting a response before raising an ErrStaleConnection error. - // Defaults to 2. - MaxPingsOut int - - // ClosedCB sets the closed handler that is called when a client will - // no longer be connected. - ClosedCB ConnHandler - - // DisconnectedCB sets the disconnected handler that is called - // whenever the connection is disconnected. - // Will not be called if DisconnectedErrCB is set - // DEPRECATED. Use DisconnectedErrCB which passes error that caused - // the disconnect event. - DisconnectedCB ConnHandler - - // DisconnectedErrCB sets the disconnected error handler that is called - // whenever the connection is disconnected. - // Disconnected error could be nil, for instance when user explicitly closes the connection. - // DisconnectedCB will not be called if DisconnectedErrCB is set - DisconnectedErrCB ConnErrHandler - - // ConnectedCB sets the connected handler called when the initial connection - // is established. It is not invoked on successful reconnects - for reconnections, - // use ReconnectedCB. ConnectedCB can be used in conjunction with RetryOnFailedConnect - // to detect whether the initial connect was successful. - ConnectedCB ConnHandler - - // ReconnectedCB sets the reconnected handler called whenever - // the connection is successfully reconnected. - ReconnectedCB ConnHandler - - // DiscoveredServersCB sets the callback that is invoked whenever a new - // server has joined the cluster. - DiscoveredServersCB ConnHandler - - // AsyncErrorCB sets the async error handler (e.g. slow consumer errors) - AsyncErrorCB ErrHandler - - // ReconnectBufSize is the size of the backing bufio during reconnect. - // Once this has been exhausted publish operations will return an error. - // Defaults to 8388608 bytes (8MB). - ReconnectBufSize int - - // SubChanLen is the size of the buffered channel used between the socket - // Go routine and the message delivery for SyncSubscriptions. - // NOTE: This does not affect AsyncSubscriptions which are - // dictated by PendingLimits() - // Defaults to 65536. - SubChanLen int - - // UserJWT sets the callback handler that will fetch a user's JWT. - UserJWT UserJWTHandler - - // Nkey sets the public nkey that will be used to authenticate - // when connecting to the server. UserJWT and Nkey are mutually exclusive - // and if defined, UserJWT will take precedence. - Nkey string - - // SignatureCB designates the function used to sign the nonce - // presented from the server. - SignatureCB SignatureHandler - - // User sets the username to be used when connecting to the server. - User string - - // Password sets the password to be used when connecting to a server. - Password string - - // Token sets the token to be used when connecting to a server. - Token string - - // TokenHandler designates the function used to generate the token to be used when connecting to a server. - TokenHandler AuthTokenHandler - - // Dialer allows a custom net.Dialer when forming connections. - // DEPRECATED: should use CustomDialer instead. - Dialer *net.Dialer - - // CustomDialer allows to specify a custom dialer (not necessarily - // a *net.Dialer). - CustomDialer CustomDialer - - // UseOldRequestStyle forces the old method of Requests that utilize - // a new Inbox and a new Subscription for each request. - UseOldRequestStyle bool - - // NoCallbacksAfterClientClose allows preventing the invocation of - // callbacks after Close() is called. Client won't receive notifications - // when Close is invoked by user code. Default is to invoke the callbacks. - NoCallbacksAfterClientClose bool - - // LameDuckModeHandler sets the callback to invoke when the server notifies - // the connection that it entered lame duck mode, that is, going to - // gradually disconnect all its connections before shutting down. This is - // often used in deployments when upgrading NATS Servers. - LameDuckModeHandler ConnHandler - - // RetryOnFailedConnect sets the connection in reconnecting state right - // away if it can't connect to a server in the initial set. The - // MaxReconnect and ReconnectWait options are used for this process, - // similarly to when an established connection is disconnected. - // If a ReconnectHandler is set, it will be invoked on the first - // successful reconnect attempt (if the initial connect fails), - // and if a ClosedHandler is set, it will be invoked if - // it fails to connect (after exhausting the MaxReconnect attempts). - RetryOnFailedConnect bool - - // For websocket connections, indicates to the server that the connection - // supports compression. If the server does too, then data will be compressed. - Compression bool - - // For websocket connections, adds a path to connections url. - // This is useful when connecting to NATS behind a proxy. - ProxyPath string - - // InboxPrefix allows the default _INBOX prefix to be customized - InboxPrefix string - - // IgnoreAuthErrorAbort - if set to true, client opts out of the default connect behavior of aborting - // subsequent reconnect attempts if server returns the same auth error twice (regardless of reconnect policy). - IgnoreAuthErrorAbort bool - - // SkipHostLookup skips the DNS lookup for the server hostname. - SkipHostLookup bool -} - -const ( - // Scratch storage for assembling protocol headers - scratchSize = 512 - - // The size of the bufio reader/writer on top of the socket. - defaultBufSize = 32768 - - // The buffered size of the flush "kick" channel - flushChanSize = 1 - - // Default server pool size - srvPoolSize = 4 - - // NUID size - nuidSize = 22 - - // Default ports used if none is specified in given URL(s) - defaultWSPortString = "80" - defaultWSSPortString = "443" - defaultPortString = "4222" -) - -// A Conn represents a bare connection to a nats-server. -// It can send and receive []byte payloads. -// The connection is safe to use in multiple Go routines concurrently. -type Conn struct { - // Keep all members for which we use atomic at the beginning of the - // struct and make sure they are all 64bits (or use padding if necessary). - // atomic.* functions crash on 32bit machines if operand is not aligned - // at 64bit. See https://github.com/golang/go/issues/599 - Statistics - mu sync.RWMutex - // Opts holds the configuration of the Conn. - // Modifying the configuration of a running Conn is a race. - Opts Options - wg sync.WaitGroup - srvPool []*srv - current *srv - urls map[string]struct{} // Keep track of all known URLs (used by processInfo) - conn net.Conn - bw *natsWriter - br *natsReader - fch chan struct{} - info serverInfo - ssid int64 - subsMu sync.RWMutex - subs map[int64]*Subscription - ach *asyncCallbacksHandler - pongs []chan struct{} - scratch [scratchSize]byte - status Status - statListeners map[Status][]chan Status - initc bool // true if the connection is performing the initial connect - err error - ps *parseState - ptmr *time.Timer - pout int - ar bool // abort reconnect - rqch chan struct{} - ws bool // true if a websocket connection - - // New style response handler - respSub string // The wildcard subject - respSubPrefix string // the wildcard prefix including trailing . - respSubLen int // the length of the wildcard prefix excluding trailing . - respScanf string // The scanf template to extract mux token - respMux *Subscription // A single response subscription - respMap map[string]chan *Msg // Request map for the response msg channels - respRand *rand.Rand // Used for generating suffix - - // Msg filters for testing. - // Protected by subsMu - filters map[string]msgFilter -} - -type natsReader struct { - r io.Reader - buf []byte - off int - n int -} - -type natsWriter struct { - w io.Writer - bufs []byte - limit int - pending *bytes.Buffer - plimit int -} - -// Subscription represents interest in a given subject. -type Subscription struct { - mu sync.Mutex - sid int64 - - // Subject that represents this subscription. This can be different - // than the received subject inside a Msg if this is a wildcard. - Subject string - - // Optional queue group name. If present, all subscriptions with the - // same name will form a distributed queue, and each message will - // only be processed by one member of the group. - Queue string - - // For holding information about a JetStream consumer. - jsi *jsSub - - delivered uint64 - max uint64 - conn *Conn - mcb MsgHandler - mch chan *Msg - closed bool - sc bool - connClosed bool - - // Type of Subscription - typ SubscriptionType - - // Async linked list - pHead *Msg - pTail *Msg - pCond *sync.Cond - pDone func(subject string) - - // Pending stats, async subscriptions, high-speed etc. - pMsgs int - pBytes int - pMsgsMax int - pBytesMax int - pMsgsLimit int - pBytesLimit int - dropped int -} - -// Msg represents a message delivered by NATS. This structure is used -// by Subscribers and PublishMsg(). -// -// # Types of Acknowledgements -// -// In case using JetStream, there are multiple ways to ack a Msg: -// -// // Acknowledgement that a message has been processed. -// msg.Ack() -// -// // Negatively acknowledges a message. -// msg.Nak() -// -// // Terminate a message so that it is not redelivered further. -// msg.Term() -// -// // Signal the server that the message is being worked on and reset redelivery timer. -// msg.InProgress() -type Msg struct { - Subject string - Reply string - Header Header - Data []byte - Sub *Subscription - // Internal - next *Msg - wsz int - barrier *barrierInfo - ackd uint32 -} - -// Compares two msgs, ignores sub but checks all other public fields. -func (m *Msg) Equal(msg *Msg) bool { - if m == msg { - return true - } - if m == nil || msg == nil { - return false - } - if m.Subject != msg.Subject || m.Reply != msg.Reply { - return false - } - if !bytes.Equal(m.Data, msg.Data) { - return false - } - if len(m.Header) != len(msg.Header) { - return false - } - for k, v := range m.Header { - val, ok := msg.Header[k] - if !ok || len(v) != len(val) { - return false - } - for i, hdr := range v { - if hdr != val[i] { - return false - } - } - } - return true -} - -// Size returns a message size in bytes. -func (m *Msg) Size() int { - if m.wsz != 0 { - return m.wsz - } - hdr, _ := m.headerBytes() - return len(m.Subject) + len(m.Reply) + len(hdr) + len(m.Data) -} - -func (m *Msg) headerBytes() ([]byte, error) { - var hdr []byte - if len(m.Header) == 0 { - return hdr, nil - } - - var b bytes.Buffer - _, err := b.WriteString(hdrLine) - if err != nil { - return nil, ErrBadHeaderMsg - } - - err = http.Header(m.Header).Write(&b) - if err != nil { - return nil, ErrBadHeaderMsg - } - - _, err = b.WriteString(crlf) - if err != nil { - return nil, ErrBadHeaderMsg - } - - return b.Bytes(), nil -} - -type barrierInfo struct { - refs int64 - f func() -} - -// Tracks various stats received and sent on this connection, -// including counts for messages and bytes. -type Statistics struct { - InMsgs uint64 - OutMsgs uint64 - InBytes uint64 - OutBytes uint64 - Reconnects uint64 -} - -// Tracks individual backend servers. -type srv struct { - url *url.URL - didConnect bool - reconnects int - lastErr error - isImplicit bool - tlsName string -} - -// The INFO block received from the server. -type serverInfo struct { - ID string `json:"server_id"` - Name string `json:"server_name"` - Proto int `json:"proto"` - Version string `json:"version"` - Host string `json:"host"` - Port int `json:"port"` - Headers bool `json:"headers"` - AuthRequired bool `json:"auth_required,omitempty"` - TLSRequired bool `json:"tls_required,omitempty"` - TLSAvailable bool `json:"tls_available,omitempty"` - MaxPayload int64 `json:"max_payload"` - CID uint64 `json:"client_id,omitempty"` - ClientIP string `json:"client_ip,omitempty"` - Nonce string `json:"nonce,omitempty"` - Cluster string `json:"cluster,omitempty"` - ConnectURLs []string `json:"connect_urls,omitempty"` - LameDuckMode bool `json:"ldm,omitempty"` -} - -const ( - // clientProtoZero is the original client protocol from 2009. - // http://nats.io/documentation/internals/nats-protocol/ - /* clientProtoZero */ _ = iota - // clientProtoInfo signals a client can receive more then the original INFO block. - // This can be used to update clients on other cluster members, etc. - clientProtoInfo -) - -type connectInfo struct { - Verbose bool `json:"verbose"` - Pedantic bool `json:"pedantic"` - UserJWT string `json:"jwt,omitempty"` - Nkey string `json:"nkey,omitempty"` - Signature string `json:"sig,omitempty"` - User string `json:"user,omitempty"` - Pass string `json:"pass,omitempty"` - Token string `json:"auth_token,omitempty"` - TLS bool `json:"tls_required"` - Name string `json:"name"` - Lang string `json:"lang"` - Version string `json:"version"` - Protocol int `json:"protocol"` - Echo bool `json:"echo"` - Headers bool `json:"headers"` - NoResponders bool `json:"no_responders"` -} - -// MsgHandler is a callback function that processes messages delivered to -// asynchronous subscribers. -type MsgHandler func(msg *Msg) - -// Connect will attempt to connect to the NATS system. -// The url can contain username/password semantics. e.g. nats://derek:pass@localhost:4222 -// Comma separated arrays are also supported, e.g. urlA, urlB. -// Options start with the defaults but can be overridden. -// To connect to a NATS Server's websocket port, use the `ws` or `wss` scheme, such as -// `ws://localhost:8080`. Note that websocket schemes cannot be mixed with others (nats/tls). -func Connect(url string, options ...Option) (*Conn, error) { - opts := GetDefaultOptions() - opts.Servers = processUrlString(url) - for _, opt := range options { - if opt != nil { - if err := opt(&opts); err != nil { - return nil, err - } - } - } - return opts.Connect() -} - -// Options that can be passed to Connect. - -// Name is an Option to set the client name. -func Name(name string) Option { - return func(o *Options) error { - o.Name = name - return nil - } -} - -// InProcessServer is an Option that will try to establish a direction to a NATS server -// running within the process instead of dialing via TCP. -func InProcessServer(server InProcessConnProvider) Option { - return func(o *Options) error { - o.InProcessServer = server - return nil - } -} - -// Secure is an Option to enable TLS secure connections that skip server verification by default. -// Pass a TLS Configuration for proper TLS. -// NOTE: This should NOT be used in a production setting. -func Secure(tls ...*tls.Config) Option { - return func(o *Options) error { - o.Secure = true - // Use of variadic just simplifies testing scenarios. We only take the first one. - if len(tls) > 1 { - return ErrMultipleTLSConfigs - } - if len(tls) == 1 { - o.TLSConfig = tls[0] - } - return nil - } -} - -// RootCAs is a helper option to provide the RootCAs pool from a list of filenames. -// If Secure is not already set this will set it as well. -func RootCAs(file ...string) Option { - return func(o *Options) error { - rootCAsCB := func() (*x509.CertPool, error) { - pool := x509.NewCertPool() - for _, f := range file { - rootPEM, err := os.ReadFile(f) - if err != nil || rootPEM == nil { - return nil, fmt.Errorf("nats: error loading or parsing rootCA file: %w", err) - } - ok := pool.AppendCertsFromPEM(rootPEM) - if !ok { - return nil, fmt.Errorf("nats: failed to parse root certificate from %q", f) - } - } - return pool, nil - } - if o.TLSConfig == nil { - o.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} - } - if _, err := rootCAsCB(); err != nil { - return err - } - o.RootCAsCB = rootCAsCB - o.Secure = true - return nil - } -} - -// ClientCert is a helper option to provide the client certificate from a file. -// If Secure is not already set this will set it as well. -func ClientCert(certFile, keyFile string) Option { - return func(o *Options) error { - tlsCertCB := func() (tls.Certificate, error) { - cert, err := tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return tls.Certificate{}, fmt.Errorf("nats: error loading client certificate: %w", err) - } - cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0]) - if err != nil { - return tls.Certificate{}, fmt.Errorf("nats: error parsing client certificate: %w", err) - } - return cert, nil - } - if o.TLSConfig == nil { - o.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} - } - if _, err := tlsCertCB(); err != nil { - return err - } - o.TLSCertCB = tlsCertCB - o.Secure = true - return nil - } -} - -// NoReconnect is an Option to turn off reconnect behavior. -func NoReconnect() Option { - return func(o *Options) error { - o.AllowReconnect = false - return nil - } -} - -// DontRandomize is an Option to turn off randomizing the server pool. -func DontRandomize() Option { - return func(o *Options) error { - o.NoRandomize = true - return nil - } -} - -// NoEcho is an Option to turn off messages echoing back from a server. -// Note this is supported on servers >= version 1.2. Proto 1 or greater. -func NoEcho() Option { - return func(o *Options) error { - o.NoEcho = true - return nil - } -} - -// ReconnectWait is an Option to set the wait time between reconnect attempts. -// Defaults to 2s. -func ReconnectWait(t time.Duration) Option { - return func(o *Options) error { - o.ReconnectWait = t - return nil - } -} - -// MaxReconnects is an Option to set the maximum number of reconnect attempts. -// If negative, it will never stop trying to reconnect. -// Defaults to 60. -func MaxReconnects(max int) Option { - return func(o *Options) error { - o.MaxReconnect = max - return nil - } -} - -// ReconnectJitter is an Option to set the upper bound of a random delay added ReconnectWait. -// Defaults to 100ms and 1s, respectively. -func ReconnectJitter(jitter, jitterForTLS time.Duration) Option { - return func(o *Options) error { - o.ReconnectJitter = jitter - o.ReconnectJitterTLS = jitterForTLS - return nil - } -} - -// CustomReconnectDelay is an Option to set the CustomReconnectDelayCB option. -// See CustomReconnectDelayCB Option for more details. -func CustomReconnectDelay(cb ReconnectDelayHandler) Option { - return func(o *Options) error { - o.CustomReconnectDelayCB = cb - return nil - } -} - -// PingInterval is an Option to set the period for client ping commands. -// Defaults to 2m. -func PingInterval(t time.Duration) Option { - return func(o *Options) error { - o.PingInterval = t - return nil - } -} - -// MaxPingsOutstanding is an Option to set the maximum number of ping requests -// that can go unanswered by the server before closing the connection. -// Defaults to 2. -func MaxPingsOutstanding(max int) Option { - return func(o *Options) error { - o.MaxPingsOut = max - return nil - } -} - -// ReconnectBufSize sets the buffer size of messages kept while busy reconnecting. -// Defaults to 8388608 bytes (8MB). It can be disabled by setting it to -1. -func ReconnectBufSize(size int) Option { - return func(o *Options) error { - o.ReconnectBufSize = size - return nil - } -} - -// Timeout is an Option to set the timeout for Dial on a connection. -// Defaults to 2s. -func Timeout(t time.Duration) Option { - return func(o *Options) error { - o.Timeout = t - return nil - } -} - -// FlusherTimeout is an Option to set the write (and flush) timeout on a connection. -func FlusherTimeout(t time.Duration) Option { - return func(o *Options) error { - o.FlusherTimeout = t - return nil - } -} - -// DrainTimeout is an Option to set the timeout for draining a connection. -// Defaults to 30s. -func DrainTimeout(t time.Duration) Option { - return func(o *Options) error { - o.DrainTimeout = t - return nil - } -} - -// DisconnectErrHandler is an Option to set the disconnected error handler. -func DisconnectErrHandler(cb ConnErrHandler) Option { - return func(o *Options) error { - o.DisconnectedErrCB = cb - return nil - } -} - -// DisconnectHandler is an Option to set the disconnected handler. -// DEPRECATED: Use DisconnectErrHandler. -func DisconnectHandler(cb ConnHandler) Option { - return func(o *Options) error { - o.DisconnectedCB = cb - return nil - } -} - -// ConnectHandler is an Option to set the connected handler. -func ConnectHandler(cb ConnHandler) Option { - return func(o *Options) error { - o.ConnectedCB = cb - return nil - } -} - -// ReconnectHandler is an Option to set the reconnected handler. -func ReconnectHandler(cb ConnHandler) Option { - return func(o *Options) error { - o.ReconnectedCB = cb - return nil - } -} - -// ClosedHandler is an Option to set the closed handler. -func ClosedHandler(cb ConnHandler) Option { - return func(o *Options) error { - o.ClosedCB = cb - return nil - } -} - -// DiscoveredServersHandler is an Option to set the new servers handler. -func DiscoveredServersHandler(cb ConnHandler) Option { - return func(o *Options) error { - o.DiscoveredServersCB = cb - return nil - } -} - -// ErrorHandler is an Option to set the async error handler. -func ErrorHandler(cb ErrHandler) Option { - return func(o *Options) error { - o.AsyncErrorCB = cb - return nil - } -} - -// UserInfo is an Option to set the username and password to -// use when not included directly in the URLs. -func UserInfo(user, password string) Option { - return func(o *Options) error { - o.User = user - o.Password = password - return nil - } -} - -// Token is an Option to set the token to use -// when a token is not included directly in the URLs -// and when a token handler is not provided. -func Token(token string) Option { - return func(o *Options) error { - if o.TokenHandler != nil { - return ErrTokenAlreadySet - } - o.Token = token - return nil - } -} - -// TokenHandler is an Option to set the token handler to use -// when a token is not included directly in the URLs -// and when a token is not set. -func TokenHandler(cb AuthTokenHandler) Option { - return func(o *Options) error { - if o.Token != "" { - return ErrTokenAlreadySet - } - o.TokenHandler = cb - return nil - } -} - -// UserCredentials is a convenience function that takes a filename -// for a user's JWT and a filename for the user's private Nkey seed. -func UserCredentials(userOrChainedFile string, seedFiles ...string) Option { - userCB := func() (string, error) { - return userFromFile(userOrChainedFile) - } - var keyFile string - if len(seedFiles) > 0 { - keyFile = seedFiles[0] - } else { - keyFile = userOrChainedFile - } - sigCB := func(nonce []byte) ([]byte, error) { - return sigHandler(nonce, keyFile) - } - return UserJWT(userCB, sigCB) -} - -// UserJWTAndSeed is a convenience function that takes the JWT and seed -// values as strings. -func UserJWTAndSeed(jwt string, seed string) Option { - userCB := func() (string, error) { - return jwt, nil - } - - sigCB := func(nonce []byte) ([]byte, error) { - kp, err := nkeys.FromSeed([]byte(seed)) - if err != nil { - return nil, fmt.Errorf("unable to extract key pair from seed: %w", err) - } - // Wipe our key on exit. - defer kp.Wipe() - - sig, _ := kp.Sign(nonce) - return sig, nil - } - - return UserJWT(userCB, sigCB) -} - -// UserJWT will set the callbacks to retrieve the user's JWT and -// the signature callback to sign the server nonce. This an the Nkey -// option are mutually exclusive. -func UserJWT(userCB UserJWTHandler, sigCB SignatureHandler) Option { - return func(o *Options) error { - if userCB == nil { - return ErrNoUserCB - } - if sigCB == nil { - return ErrUserButNoSigCB - } - // Smoke test the user callback to ensure it is setup properly - // when processing options. - if _, err := userCB(); err != nil { - return err - } - - o.UserJWT = userCB - o.SignatureCB = sigCB - return nil - } -} - -// Nkey will set the public Nkey and the signature callback to -// sign the server nonce. -func Nkey(pubKey string, sigCB SignatureHandler) Option { - return func(o *Options) error { - o.Nkey = pubKey - o.SignatureCB = sigCB - if pubKey != "" && sigCB == nil { - return ErrNkeyButNoSigCB - } - return nil - } -} - -// SyncQueueLen will set the maximum queue len for the internal -// channel used for SubscribeSync(). -// Defaults to 65536. -func SyncQueueLen(max int) Option { - return func(o *Options) error { - o.SubChanLen = max - return nil - } -} - -// Dialer is an Option to set the dialer which will be used when -// attempting to establish a connection. -// DEPRECATED: Should use CustomDialer instead. -func Dialer(dialer *net.Dialer) Option { - return func(o *Options) error { - o.Dialer = dialer - return nil - } -} - -// SetCustomDialer is an Option to set a custom dialer which will be -// used when attempting to establish a connection. If both Dialer -// and CustomDialer are specified, CustomDialer takes precedence. -func SetCustomDialer(dialer CustomDialer) Option { - return func(o *Options) error { - o.CustomDialer = dialer - return nil - } -} - -// UseOldRequestStyle is an Option to force usage of the old Request style. -func UseOldRequestStyle() Option { - return func(o *Options) error { - o.UseOldRequestStyle = true - return nil - } -} - -// NoCallbacksAfterClientClose is an Option to disable callbacks when user code -// calls Close(). If close is initiated by any other condition, callbacks -// if any will be invoked. -func NoCallbacksAfterClientClose() Option { - return func(o *Options) error { - o.NoCallbacksAfterClientClose = true - return nil - } -} - -// LameDuckModeHandler sets the callback to invoke when the server notifies -// the connection that it entered lame duck mode, that is, going to -// gradually disconnect all its connections before shutting down. This is -// often used in deployments when upgrading NATS Servers. -func LameDuckModeHandler(cb ConnHandler) Option { - return func(o *Options) error { - o.LameDuckModeHandler = cb - return nil - } -} - -// RetryOnFailedConnect sets the connection in reconnecting state right away -// if it can't connect to a server in the initial set. -// See RetryOnFailedConnect option for more details. -func RetryOnFailedConnect(retry bool) Option { - return func(o *Options) error { - o.RetryOnFailedConnect = retry - return nil - } -} - -// Compression is an Option to indicate if this connection supports -// compression. Currently only supported for Websocket connections. -func Compression(enabled bool) Option { - return func(o *Options) error { - o.Compression = enabled - return nil - } -} - -// ProxyPath is an option for websocket connections that adds a path to connections url. -// This is useful when connecting to NATS behind a proxy. -func ProxyPath(path string) Option { - return func(o *Options) error { - o.ProxyPath = path - return nil - } -} - -// CustomInboxPrefix configures the request + reply inbox prefix -func CustomInboxPrefix(p string) Option { - return func(o *Options) error { - if p == "" || strings.Contains(p, ">") || strings.Contains(p, "*") || strings.HasSuffix(p, ".") { - return fmt.Errorf("nats: invalid custom prefix") - } - o.InboxPrefix = p - return nil - } -} - -// IgnoreAuthErrorAbort opts out of the default connect behavior of aborting -// subsequent reconnect attempts if server returns the same auth error twice. -func IgnoreAuthErrorAbort() Option { - return func(o *Options) error { - o.IgnoreAuthErrorAbort = true - return nil - } -} - -// SkipHostLookup is an Option to skip the host lookup when connecting to a server. -func SkipHostLookup() Option { - return func(o *Options) error { - o.SkipHostLookup = true - return nil - } -} - -// Handler processing - -// SetDisconnectHandler will set the disconnect event handler. -// DEPRECATED: Use SetDisconnectErrHandler -func (nc *Conn) SetDisconnectHandler(dcb ConnHandler) { - if nc == nil { - return - } - nc.mu.Lock() - defer nc.mu.Unlock() - nc.Opts.DisconnectedCB = dcb -} - -// SetDisconnectErrHandler will set the disconnect event handler. -func (nc *Conn) SetDisconnectErrHandler(dcb ConnErrHandler) { - if nc == nil { - return - } - nc.mu.Lock() - defer nc.mu.Unlock() - nc.Opts.DisconnectedErrCB = dcb -} - -// DisconnectErrHandler will return the disconnect event handler. -func (nc *Conn) DisconnectErrHandler() ConnErrHandler { - if nc == nil { - return nil - } - nc.mu.Lock() - defer nc.mu.Unlock() - return nc.Opts.DisconnectedErrCB -} - -// SetReconnectHandler will set the reconnect event handler. -func (nc *Conn) SetReconnectHandler(rcb ConnHandler) { - if nc == nil { - return - } - nc.mu.Lock() - defer nc.mu.Unlock() - nc.Opts.ReconnectedCB = rcb -} - -// ReconnectHandler will return the reconnect event handler. -func (nc *Conn) ReconnectHandler() ConnHandler { - if nc == nil { - return nil - } - nc.mu.Lock() - defer nc.mu.Unlock() - return nc.Opts.ReconnectedCB -} - -// SetDiscoveredServersHandler will set the discovered servers handler. -func (nc *Conn) SetDiscoveredServersHandler(dscb ConnHandler) { - if nc == nil { - return - } - nc.mu.Lock() - defer nc.mu.Unlock() - nc.Opts.DiscoveredServersCB = dscb -} - -// DiscoveredServersHandler will return the discovered servers handler. -func (nc *Conn) DiscoveredServersHandler() ConnHandler { - if nc == nil { - return nil - } - nc.mu.Lock() - defer nc.mu.Unlock() - return nc.Opts.DiscoveredServersCB -} - -// SetClosedHandler will set the closed event handler. -func (nc *Conn) SetClosedHandler(cb ConnHandler) { - if nc == nil { - return - } - nc.mu.Lock() - defer nc.mu.Unlock() - nc.Opts.ClosedCB = cb -} - -// ClosedHandler will return the closed event handler. -func (nc *Conn) ClosedHandler() ConnHandler { - if nc == nil { - return nil - } - nc.mu.Lock() - defer nc.mu.Unlock() - return nc.Opts.ClosedCB -} - -// SetErrorHandler will set the async error handler. -func (nc *Conn) SetErrorHandler(cb ErrHandler) { - if nc == nil { - return - } - nc.mu.Lock() - defer nc.mu.Unlock() - nc.Opts.AsyncErrorCB = cb -} - -// ErrorHandler will return the async error handler. -func (nc *Conn) ErrorHandler() ErrHandler { - if nc == nil { - return nil - } - nc.mu.Lock() - defer nc.mu.Unlock() - return nc.Opts.AsyncErrorCB -} - -// Process the url string argument to Connect. -// Return an array of urls, even if only one. -func processUrlString(url string) []string { - urls := strings.Split(url, ",") - var j int - for _, s := range urls { - u := strings.TrimSpace(s) - if len(u) > 0 { - urls[j] = u - j++ - } - } - return urls[:j] -} - -// Connect will attempt to connect to a NATS server with multiple options. -func (o Options) Connect() (*Conn, error) { - nc := &Conn{Opts: o} - - // Some default options processing. - if nc.Opts.MaxPingsOut == 0 { - nc.Opts.MaxPingsOut = DefaultMaxPingOut - } - // Allow old default for channel length to work correctly. - if nc.Opts.SubChanLen == 0 { - nc.Opts.SubChanLen = DefaultMaxChanLen - } - // Default ReconnectBufSize - if nc.Opts.ReconnectBufSize == 0 { - nc.Opts.ReconnectBufSize = DefaultReconnectBufSize - } - // Ensure that Timeout is not 0 - if nc.Opts.Timeout == 0 { - nc.Opts.Timeout = DefaultTimeout - } - - // Check first for user jwt callback being defined and nkey. - if nc.Opts.UserJWT != nil && nc.Opts.Nkey != "" { - return nil, ErrNkeyAndUser - } - - // Check if we have an nkey but no signature callback defined. - if nc.Opts.Nkey != "" && nc.Opts.SignatureCB == nil { - return nil, ErrNkeyButNoSigCB - } - - // Allow custom Dialer for connecting using a timeout by default - if nc.Opts.Dialer == nil { - nc.Opts.Dialer = &net.Dialer{ - Timeout: nc.Opts.Timeout, - } - } - - if err := nc.setupServerPool(); err != nil { - return nil, err - } - - // Create the async callback handler. - nc.ach = &asyncCallbacksHandler{} - nc.ach.cond = sync.NewCond(&nc.ach.mu) - - // Set a default error handler that will print to stderr. - if nc.Opts.AsyncErrorCB == nil { - nc.Opts.AsyncErrorCB = defaultErrHandler - } - - // Create reader/writer - nc.newReaderWriter() - - connectionEstablished, err := nc.connect() - if err != nil { - return nil, err - } - - // Spin up the async cb dispatcher on success - go nc.ach.asyncCBDispatcher() - - if connectionEstablished && nc.Opts.ConnectedCB != nil { - nc.ach.push(func() { nc.Opts.ConnectedCB(nc) }) - } - - return nc, nil -} - -func defaultErrHandler(nc *Conn, sub *Subscription, err error) { - var cid uint64 - if nc != nil { - nc.mu.RLock() - cid = nc.info.CID - nc.mu.RUnlock() - } - var errStr string - if sub != nil { - var subject string - sub.mu.Lock() - if sub.jsi != nil { - subject = sub.jsi.psubj - } else { - subject = sub.Subject - } - sub.mu.Unlock() - errStr = fmt.Sprintf("%s on connection [%d] for subscription on %q\n", err.Error(), cid, subject) - } else { - errStr = fmt.Sprintf("%s on connection [%d]\n", err.Error(), cid) - } - os.Stderr.WriteString(errStr) -} - -const ( - _CRLF_ = "\r\n" - _EMPTY_ = "" - _SPC_ = " " - _PUB_P_ = "PUB " - _HPUB_P_ = "HPUB " -) - -var _CRLF_BYTES_ = []byte(_CRLF_) - -const ( - _OK_OP_ = "+OK" - _ERR_OP_ = "-ERR" - _PONG_OP_ = "PONG" - _INFO_OP_ = "INFO" -) - -const ( - connectProto = "CONNECT %s" + _CRLF_ - pingProto = "PING" + _CRLF_ - pongProto = "PONG" + _CRLF_ - subProto = "SUB %s %s %d" + _CRLF_ - unsubProto = "UNSUB %d %s" + _CRLF_ - okProto = _OK_OP_ + _CRLF_ -) - -// Return the currently selected server -func (nc *Conn) currentServer() (int, *srv) { - for i, s := range nc.srvPool { - if s == nil { - continue - } - if s == nc.current { - return i, s - } - } - return -1, nil -} - -// Pop the current server and put onto the end of the list. Select head of list as long -// as number of reconnect attempts under MaxReconnect. -func (nc *Conn) selectNextServer() (*srv, error) { - i, s := nc.currentServer() - if i < 0 { - return nil, ErrNoServers - } - sp := nc.srvPool - num := len(sp) - copy(sp[i:num-1], sp[i+1:num]) - maxReconnect := nc.Opts.MaxReconnect - if maxReconnect < 0 || s.reconnects < maxReconnect { - nc.srvPool[num-1] = s - } else { - nc.srvPool = sp[0 : num-1] - } - if len(nc.srvPool) <= 0 { - nc.current = nil - return nil, ErrNoServers - } - nc.current = nc.srvPool[0] - return nc.srvPool[0], nil -} - -// Will assign the correct server to nc.current -func (nc *Conn) pickServer() error { - nc.current = nil - if len(nc.srvPool) <= 0 { - return ErrNoServers - } - - for _, s := range nc.srvPool { - if s != nil { - nc.current = s - return nil - } - } - return ErrNoServers -} - -const tlsScheme = "tls" - -// Create the server pool using the options given. -// We will place a Url option first, followed by any -// Server Options. We will randomize the server pool unless -// the NoRandomize flag is set. -func (nc *Conn) setupServerPool() error { - nc.srvPool = make([]*srv, 0, srvPoolSize) - nc.urls = make(map[string]struct{}, srvPoolSize) - - // Create srv objects from each url string in nc.Opts.Servers - // and add them to the pool. - for _, urlString := range nc.Opts.Servers { - if err := nc.addURLToPool(urlString, false, false); err != nil { - return err - } - } - - // Randomize if allowed to - if !nc.Opts.NoRandomize { - nc.shufflePool(0) - } - - // Normally, if this one is set, Options.Servers should not be, - // but we always allowed that, so continue to do so. - if nc.Opts.Url != _EMPTY_ { - // Add to the end of the array - if err := nc.addURLToPool(nc.Opts.Url, false, false); err != nil { - return err - } - // Then swap it with first to guarantee that Options.Url is tried first. - last := len(nc.srvPool) - 1 - if last > 0 { - nc.srvPool[0], nc.srvPool[last] = nc.srvPool[last], nc.srvPool[0] - } - } else if len(nc.srvPool) <= 0 { - // Place default URL if pool is empty. - if err := nc.addURLToPool(DefaultURL, false, false); err != nil { - return err - } - } - - // Check for Scheme hint to move to TLS mode. - for _, srv := range nc.srvPool { - if srv.url.Scheme == tlsScheme || srv.url.Scheme == wsSchemeTLS { - // FIXME(dlc), this is for all in the pool, should be case by case. - nc.Opts.Secure = true - if nc.Opts.TLSConfig == nil { - nc.Opts.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} - } - } - } - - return nc.pickServer() -} - -// Helper function to return scheme -func (nc *Conn) connScheme() string { - if nc.ws { - if nc.Opts.Secure { - return wsSchemeTLS - } - return wsScheme - } - if nc.Opts.Secure { - return tlsScheme - } - return "nats" -} - -// Return true iff u.Hostname() is an IP address. -func hostIsIP(u *url.URL) bool { - return net.ParseIP(u.Hostname()) != nil -} - -// addURLToPool adds an entry to the server pool -func (nc *Conn) addURLToPool(sURL string, implicit, saveTLSName bool) error { - if !strings.Contains(sURL, "://") { - sURL = fmt.Sprintf("%s://%s", nc.connScheme(), sURL) - } - var ( - u *url.URL - err error - ) - for i := 0; i < 2; i++ { - u, err = url.Parse(sURL) - if err != nil { - return err - } - if u.Port() != "" { - break - } - // In case given URL is of the form "localhost:", just add - // the port number at the end, otherwise, add ":4222". - if sURL[len(sURL)-1] != ':' { - sURL += ":" - } - switch u.Scheme { - case wsScheme: - sURL += defaultWSPortString - case wsSchemeTLS: - sURL += defaultWSSPortString - default: - sURL += defaultPortString - } - } - - isWS := isWebsocketScheme(u) - // We don't support mix and match of websocket and non websocket URLs. - // If this is the first URL, then we accept and switch the global state - // to websocket. After that, we will know how to reject mixed URLs. - if len(nc.srvPool) == 0 { - nc.ws = isWS - } else if isWS && !nc.ws || !isWS && nc.ws { - return fmt.Errorf("mixing of websocket and non websocket URLs is not allowed") - } - - var tlsName string - if implicit { - curl := nc.current.url - // Check to see if we do not have a url.User but current connected - // url does. If so copy over. - if u.User == nil && curl.User != nil { - u.User = curl.User - } - // We are checking to see if we have a secure connection and are - // adding an implicit server that just has an IP. If so we will remember - // the current hostname we are connected to. - if saveTLSName && hostIsIP(u) { - tlsName = curl.Hostname() - } - } - - s := &srv{url: u, isImplicit: implicit, tlsName: tlsName} - nc.srvPool = append(nc.srvPool, s) - nc.urls[u.Host] = struct{}{} - return nil -} - -// shufflePool swaps randomly elements in the server pool -// The `offset` value indicates that the shuffling should start at -// this offset and leave the elements from [0..offset) intact. -func (nc *Conn) shufflePool(offset int) { - if len(nc.srvPool) <= offset+1 { - return - } - source := rand.NewSource(time.Now().UnixNano()) - r := rand.New(source) - for i := offset; i < len(nc.srvPool); i++ { - j := offset + r.Intn(i+1-offset) - nc.srvPool[i], nc.srvPool[j] = nc.srvPool[j], nc.srvPool[i] - } -} - -func (nc *Conn) newReaderWriter() { - nc.br = &natsReader{ - buf: make([]byte, defaultBufSize), - off: -1, - } - nc.bw = &natsWriter{ - limit: defaultBufSize, - plimit: nc.Opts.ReconnectBufSize, - } -} - -func (nc *Conn) bindToNewConn() { - bw := nc.bw - bw.w, bw.bufs = nc.newWriter(), nil - br := nc.br - br.r, br.n, br.off = nc.conn, 0, -1 -} - -func (nc *Conn) newWriter() io.Writer { - var w io.Writer = nc.conn - if nc.Opts.FlusherTimeout > 0 { - w = &timeoutWriter{conn: nc.conn, timeout: nc.Opts.FlusherTimeout} - } - return w -} - -func (w *natsWriter) appendString(str string) error { - return w.appendBufs([]byte(str)) -} - -func (w *natsWriter) appendBufs(bufs ...[]byte) error { - for _, buf := range bufs { - if len(buf) == 0 { - continue - } - if w.pending != nil { - w.pending.Write(buf) - } else { - w.bufs = append(w.bufs, buf...) - } - } - if w.pending == nil && len(w.bufs) >= w.limit { - return w.flush() - } - return nil -} - -func (w *natsWriter) writeDirect(strs ...string) error { - for _, str := range strs { - if _, err := w.w.Write([]byte(str)); err != nil { - return err - } - } - return nil -} - -func (w *natsWriter) flush() error { - // If a pending buffer is set, we don't flush. Code that needs to - // write directly to the socket, by-passing buffers during (re)connect, - // will use the writeDirect() API. - if w.pending != nil { - return nil - } - // Do not skip calling w.w.Write() here if len(w.bufs) is 0 because - // the actual writer (if websocket for instance) may have things - // to do such as sending control frames, etc.. - _, err := w.w.Write(w.bufs) - w.bufs = w.bufs[:0] - return err -} - -func (w *natsWriter) buffered() int { - if w.pending != nil { - return w.pending.Len() - } - return len(w.bufs) -} - -func (w *natsWriter) switchToPending() { - w.pending = new(bytes.Buffer) -} - -func (w *natsWriter) flushPendingBuffer() error { - if w.pending == nil || w.pending.Len() == 0 { - return nil - } - _, err := w.w.Write(w.pending.Bytes()) - // Reset the pending buffer at this point because we don't want - // to take the risk of sending duplicates or partials. - w.pending.Reset() - return err -} - -func (w *natsWriter) atLimitIfUsingPending() bool { - if w.pending == nil { - return false - } - return w.pending.Len() >= w.plimit -} - -func (w *natsWriter) doneWithPending() { - w.pending = nil -} - -// Notify the reader that we are done with the connect, where "read" operations -// happen synchronously and under the connection lock. After this point, "read" -// will be happening from the read loop, without the connection lock. -// -// Note: this runs under the connection lock. -func (r *natsReader) doneWithConnect() { - if wsr, ok := r.r.(*websocketReader); ok { - wsr.doneWithConnect() - } -} - -func (r *natsReader) Read() ([]byte, error) { - if r.off >= 0 { - off := r.off - r.off = -1 - return r.buf[off:r.n], nil - } - var err error - r.n, err = r.r.Read(r.buf) - return r.buf[:r.n], err -} - -func (r *natsReader) ReadString(delim byte) (string, error) { - var s string -build_string: - // First look if we have something in the buffer - if r.off >= 0 { - i := bytes.IndexByte(r.buf[r.off:r.n], delim) - if i >= 0 { - end := r.off + i + 1 - s += string(r.buf[r.off:end]) - r.off = end - if r.off >= r.n { - r.off = -1 - } - return s, nil - } - // We did not find the delim, so will have to read more. - s += string(r.buf[r.off:r.n]) - r.off = -1 - } - if _, err := r.Read(); err != nil { - return s, err - } - r.off = 0 - goto build_string -} - -// createConn will connect to the server and wrap the appropriate -// bufio structures. It will do the right thing when an existing -// connection is in place. -func (nc *Conn) createConn() (err error) { - if nc.Opts.Timeout < 0 { - return ErrBadTimeout - } - if _, cur := nc.currentServer(); cur == nil { - return ErrNoServers - } - - // If we have a reference to an in-process server then establish a - // connection using that. - if nc.Opts.InProcessServer != nil { - conn, err := nc.Opts.InProcessServer.InProcessConn() - if err != nil { - return fmt.Errorf("failed to get in-process connection: %w", err) - } - nc.conn = conn - nc.bindToNewConn() - return nil - } - - // We will auto-expand host names if they resolve to multiple IPs - hosts := []string{} - u := nc.current.url - - if !nc.Opts.SkipHostLookup && net.ParseIP(u.Hostname()) == nil { - addrs, _ := net.LookupHost(u.Hostname()) - for _, addr := range addrs { - hosts = append(hosts, net.JoinHostPort(addr, u.Port())) - } - } - // Fall back to what we were given. - if len(hosts) == 0 { - hosts = append(hosts, u.Host) - } - - // CustomDialer takes precedence. If not set, use Opts.Dialer which - // is set to a default *net.Dialer (in Connect()) if not explicitly - // set by the user. - dialer := nc.Opts.CustomDialer - if dialer == nil { - // We will copy and shorten the timeout if we have multiple hosts to try. - copyDialer := *nc.Opts.Dialer - copyDialer.Timeout = copyDialer.Timeout / time.Duration(len(hosts)) - dialer = ©Dialer - } - - if len(hosts) > 1 && !nc.Opts.NoRandomize { - rand.Shuffle(len(hosts), func(i, j int) { - hosts[i], hosts[j] = hosts[j], hosts[i] - }) - } - for _, host := range hosts { - nc.conn, err = dialer.Dial("tcp", host) - if err == nil { - break - } - } - if err != nil { - return err - } - - // If scheme starts with "ws" then branch out to websocket code. - if isWebsocketScheme(u) { - return nc.wsInitHandshake(u) - } - - // Reset reader/writer to this new TCP connection - nc.bindToNewConn() - return nil -} - -type skipTLSDialer interface { - SkipTLSHandshake() bool -} - -// makeTLSConn will wrap an existing Conn using TLS -func (nc *Conn) makeTLSConn() error { - if nc.Opts.CustomDialer != nil { - // we do nothing when asked to skip the TLS wrapper - sd, ok := nc.Opts.CustomDialer.(skipTLSDialer) - if ok && sd.SkipTLSHandshake() { - return nil - } - } - // Allow the user to configure their own tls.Config structure. - tlsCopy := &tls.Config{} - if nc.Opts.TLSConfig != nil { - tlsCopy = util.CloneTLSConfig(nc.Opts.TLSConfig) - } - if nc.Opts.TLSCertCB != nil { - cert, err := nc.Opts.TLSCertCB() - if err != nil { - return err - } - tlsCopy.Certificates = []tls.Certificate{cert} - } - if nc.Opts.RootCAsCB != nil { - rootCAs, err := nc.Opts.RootCAsCB() - if err != nil { - return err - } - tlsCopy.RootCAs = rootCAs - } - // If its blank we will override it with the current host - if tlsCopy.ServerName == _EMPTY_ { - if nc.current.tlsName != _EMPTY_ { - tlsCopy.ServerName = nc.current.tlsName - } else { - h, _, _ := net.SplitHostPort(nc.current.url.Host) - tlsCopy.ServerName = h - } - } - nc.conn = tls.Client(nc.conn, tlsCopy) - conn := nc.conn.(*tls.Conn) - if err := conn.Handshake(); err != nil { - return err - } - nc.bindToNewConn() - return nil -} - -// TLSConnectionState retrieves the state of the TLS connection to the server -func (nc *Conn) TLSConnectionState() (tls.ConnectionState, error) { - if !nc.isConnected() { - return tls.ConnectionState{}, ErrDisconnected - } - - nc.mu.RLock() - conn := nc.conn - nc.mu.RUnlock() - - tc, ok := conn.(*tls.Conn) - if !ok { - return tls.ConnectionState{}, ErrConnectionNotTLS - } - - return tc.ConnectionState(), nil -} - -// waitForExits will wait for all socket watcher Go routines to -// be shutdown before proceeding. -func (nc *Conn) waitForExits() { - // Kick old flusher forcefully. - select { - case nc.fch <- struct{}{}: - default: - } - - // Wait for any previous go routines. - nc.wg.Wait() -} - -// ConnectedUrl reports the connected server's URL -func (nc *Conn) ConnectedUrl() string { - if nc == nil { - return _EMPTY_ - } - - nc.mu.RLock() - defer nc.mu.RUnlock() - - if nc.status != CONNECTED { - return _EMPTY_ - } - return nc.current.url.String() -} - -// ConnectedUrlRedacted reports the connected server's URL with passwords redacted -func (nc *Conn) ConnectedUrlRedacted() string { - if nc == nil { - return _EMPTY_ - } - - nc.mu.RLock() - defer nc.mu.RUnlock() - - if nc.status != CONNECTED { - return _EMPTY_ - } - return nc.current.url.Redacted() -} - -// ConnectedAddr returns the connected server's IP -func (nc *Conn) ConnectedAddr() string { - if nc == nil { - return _EMPTY_ - } - - nc.mu.RLock() - defer nc.mu.RUnlock() - - if nc.status != CONNECTED { - return _EMPTY_ - } - return nc.conn.RemoteAddr().String() -} - -// ConnectedServerId reports the connected server's Id -func (nc *Conn) ConnectedServerId() string { - if nc == nil { - return _EMPTY_ - } - - nc.mu.RLock() - defer nc.mu.RUnlock() - - if nc.status != CONNECTED { - return _EMPTY_ - } - return nc.info.ID -} - -// ConnectedServerName reports the connected server's name -func (nc *Conn) ConnectedServerName() string { - if nc == nil { - return _EMPTY_ - } - - nc.mu.RLock() - defer nc.mu.RUnlock() - - if nc.status != CONNECTED { - return _EMPTY_ - } - return nc.info.Name -} - -var semVerRe = regexp.MustCompile(`\Av?([0-9]+)\.?([0-9]+)?\.?([0-9]+)?`) - -func versionComponents(version string) (major, minor, patch int, err error) { - m := semVerRe.FindStringSubmatch(version) - if m == nil { - return 0, 0, 0, errors.New("invalid semver") - } - major, err = strconv.Atoi(m[1]) - if err != nil { - return -1, -1, -1, err - } - minor, err = strconv.Atoi(m[2]) - if err != nil { - return -1, -1, -1, err - } - patch, err = strconv.Atoi(m[3]) - if err != nil { - return -1, -1, -1, err - } - return major, minor, patch, err -} - -// Check for minimum server requirement. -func (nc *Conn) serverMinVersion(major, minor, patch int) bool { - smajor, sminor, spatch, _ := versionComponents(nc.ConnectedServerVersion()) - if smajor < major || (smajor == major && sminor < minor) || (smajor == major && sminor == minor && spatch < patch) { - return false - } - return true -} - -// ConnectedServerVersion reports the connected server's version as a string -func (nc *Conn) ConnectedServerVersion() string { - if nc == nil { - return _EMPTY_ - } - - nc.mu.RLock() - defer nc.mu.RUnlock() - - if nc.status != CONNECTED { - return _EMPTY_ - } - return nc.info.Version -} - -// ConnectedClusterName reports the connected server's cluster name if any -func (nc *Conn) ConnectedClusterName() string { - if nc == nil { - return _EMPTY_ - } - - nc.mu.RLock() - defer nc.mu.RUnlock() - - if nc.status != CONNECTED { - return _EMPTY_ - } - return nc.info.Cluster -} - -// Low level setup for structs, etc -func (nc *Conn) setup() { - nc.subs = make(map[int64]*Subscription) - nc.pongs = make([]chan struct{}, 0, 8) - - nc.fch = make(chan struct{}, flushChanSize) - nc.rqch = make(chan struct{}) - - // Setup scratch outbound buffer for PUB/HPUB - pub := nc.scratch[:len(_HPUB_P_)] - copy(pub, _HPUB_P_) -} - -// Process a connected connection and initialize properly. -func (nc *Conn) processConnectInit() error { - - // Set our deadline for the whole connect process - nc.conn.SetDeadline(time.Now().Add(nc.Opts.Timeout)) - defer nc.conn.SetDeadline(time.Time{}) - - // Set our status to connecting. - nc.changeConnStatus(CONNECTING) - - // Process the INFO protocol received from the server - err := nc.processExpectedInfo() - if err != nil { - return err - } - - // Send the CONNECT protocol along with the initial PING protocol. - // Wait for the PONG response (or any error that we get from the server). - err = nc.sendConnect() - if err != nil { - return err - } - - // Reset the number of PING sent out - nc.pout = 0 - - // Start or reset Timer - if nc.Opts.PingInterval > 0 { - if nc.ptmr == nil { - nc.ptmr = time.AfterFunc(nc.Opts.PingInterval, nc.processPingTimer) - } else { - nc.ptmr.Reset(nc.Opts.PingInterval) - } - } - - // Start the readLoop and flusher go routines, we will wait on both on a reconnect event. - nc.wg.Add(2) - go nc.readLoop() - go nc.flusher() - - // Notify the reader that we are done with the connect handshake, where - // reads were done synchronously and under the connection lock. - nc.br.doneWithConnect() - - return nil -} - -// Main connect function. Will connect to the nats-server. -func (nc *Conn) connect() (bool, error) { - var err error - var connectionEstablished bool - - // Create actual socket connection - // For first connect we walk all servers in the pool and try - // to connect immediately. - nc.mu.Lock() - defer nc.mu.Unlock() - nc.initc = true - // The pool may change inside the loop iteration due to INFO protocol. - for i := 0; i < len(nc.srvPool); i++ { - nc.current = nc.srvPool[i] - - if err = nc.createConn(); err == nil { - // This was moved out of processConnectInit() because - // that function is now invoked from doReconnect() too. - nc.setup() - - err = nc.processConnectInit() - - if err == nil { - nc.current.didConnect = true - nc.current.reconnects = 0 - nc.current.lastErr = nil - break - } else { - nc.mu.Unlock() - nc.close(DISCONNECTED, false, err) - nc.mu.Lock() - // Do not reset nc.current here since it would prevent - // RetryOnFailedConnect to work should this be the last server - // to try before starting doReconnect(). - } - } else { - // Cancel out default connection refused, will trigger the - // No servers error conditional - if strings.Contains(err.Error(), "connection refused") { - err = nil - } - } - } - - if err == nil && nc.status != CONNECTED { - err = ErrNoServers - } - - if err == nil { - connectionEstablished = true - nc.initc = false - } else if nc.Opts.RetryOnFailedConnect { - nc.setup() - nc.changeConnStatus(RECONNECTING) - nc.bw.switchToPending() - go nc.doReconnect(ErrNoServers) - err = nil - } else { - nc.current = nil - } - - return connectionEstablished, err -} - -// This will check to see if the connection should be -// secure. This can be dictated from either end and should -// only be called after the INIT protocol has been received. -func (nc *Conn) checkForSecure() error { - // Check to see if we need to engage TLS - o := nc.Opts - - // Check for mismatch in setups - if o.Secure && !nc.info.TLSRequired && !nc.info.TLSAvailable { - return ErrSecureConnWanted - } else if nc.info.TLSRequired && !o.Secure { - // Switch to Secure since server needs TLS. - o.Secure = true - } - - // Need to rewrap with bufio - if o.Secure { - if err := nc.makeTLSConn(); err != nil { - return err - } - } - return nil -} - -// processExpectedInfo will look for the expected first INFO message -// sent when a connection is established. The lock should be held entering. -func (nc *Conn) processExpectedInfo() error { - - c := &control{} - - // Read the protocol - err := nc.readOp(c) - if err != nil { - return err - } - - // The nats protocol should send INFO first always. - if c.op != _INFO_OP_ { - return ErrNoInfoReceived - } - - // Parse the protocol - if err := nc.processInfo(c.args); err != nil { - return err - } - - if nc.Opts.Nkey != "" && nc.info.Nonce == "" { - return ErrNkeysNotSupported - } - - // For websocket connections, we already switched to TLS if need be, - // so we are done here. - if nc.ws { - return nil - } - - return nc.checkForSecure() -} - -// Sends a protocol control message by queuing into the bufio writer -// and kicking the flush Go routine. These writes are protected. -func (nc *Conn) sendProto(proto string) { - nc.mu.Lock() - nc.bw.appendString(proto) - nc.kickFlusher() - nc.mu.Unlock() -} - -// Generate a connect protocol message, issuing user/password if -// applicable. The lock is assumed to be held upon entering. -func (nc *Conn) connectProto() (string, error) { - o := nc.Opts - var nkey, sig, user, pass, token, ujwt string - u := nc.current.url.User - if u != nil { - // if no password, assume username is authToken - if _, ok := u.Password(); !ok { - token = u.Username() - } else { - user = u.Username() - pass, _ = u.Password() - } - } else { - // Take from options (possibly all empty strings) - user = o.User - pass = o.Password - token = o.Token - nkey = o.Nkey - } - - // Look for user jwt. - if o.UserJWT != nil { - if jwt, err := o.UserJWT(); err != nil { - return _EMPTY_, err - } else { - ujwt = jwt - } - if nkey != _EMPTY_ { - return _EMPTY_, ErrNkeyAndUser - } - } - - if ujwt != _EMPTY_ || nkey != _EMPTY_ { - if o.SignatureCB == nil { - if ujwt == _EMPTY_ { - return _EMPTY_, ErrNkeyButNoSigCB - } - return _EMPTY_, ErrUserButNoSigCB - } - sigraw, err := o.SignatureCB([]byte(nc.info.Nonce)) - if err != nil { - return _EMPTY_, fmt.Errorf("error signing nonce: %w", err) - } - sig = base64.RawURLEncoding.EncodeToString(sigraw) - } - - if nc.Opts.TokenHandler != nil { - if token != _EMPTY_ { - return _EMPTY_, ErrTokenAlreadySet - } - token = nc.Opts.TokenHandler() - } - - // If our server does not support headers then we can't do them or no responders. - hdrs := nc.info.Headers - cinfo := connectInfo{o.Verbose, o.Pedantic, ujwt, nkey, sig, user, pass, token, - o.Secure, o.Name, LangString, Version, clientProtoInfo, !o.NoEcho, hdrs, hdrs} - - b, err := json.Marshal(cinfo) - if err != nil { - return _EMPTY_, ErrJsonParse - } - - // Check if NoEcho is set and we have a server that supports it. - if o.NoEcho && nc.info.Proto < 1 { - return _EMPTY_, ErrNoEchoNotSupported - } - - return fmt.Sprintf(connectProto, b), nil -} - -// normalizeErr removes the prefix -ERR, trim spaces and remove the quotes. -func normalizeErr(line string) string { - s := strings.TrimSpace(strings.TrimPrefix(line, _ERR_OP_)) - s = strings.TrimLeft(strings.TrimRight(s, "'"), "'") - return s -} - -// natsProtoErr represents an -ERR protocol message sent by the server. -type natsProtoErr struct { - description string -} - -func (nerr *natsProtoErr) Error() string { - return fmt.Sprintf("nats: %s", nerr.description) -} - -func (nerr *natsProtoErr) Is(err error) bool { - return strings.ToLower(nerr.Error()) == err.Error() -} - -// Send a connect protocol message to the server, issue user/password if -// applicable. Will wait for a flush to return from the server for error -// processing. -func (nc *Conn) sendConnect() error { - // Construct the CONNECT protocol string - cProto, err := nc.connectProto() - if err != nil { - if !nc.initc && nc.Opts.AsyncErrorCB != nil { - nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) - } - return err - } - - // Write the protocol and PING directly to the underlying writer. - if err := nc.bw.writeDirect(cProto, pingProto); err != nil { - return err - } - - // We don't want to read more than we need here, otherwise - // we would need to transfer the excess read data to the readLoop. - // Since in normal situations we just are looking for a PONG\r\n, - // reading byte-by-byte here is ok. - proto, err := nc.readProto() - if err != nil { - if !nc.initc && nc.Opts.AsyncErrorCB != nil { - nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) - } - return err - } - - // If opts.Verbose is set, handle +OK - if nc.Opts.Verbose && proto == okProto { - // Read the rest now... - proto, err = nc.readProto() - if err != nil { - if !nc.initc && nc.Opts.AsyncErrorCB != nil { - nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) - } - return err - } - } - - // We expect a PONG - if proto != pongProto { - // But it could be something else, like -ERR - - // Since we no longer use ReadLine(), trim the trailing "\r\n" - proto = strings.TrimRight(proto, "\r\n") - - // If it's a server error... - if strings.HasPrefix(proto, _ERR_OP_) { - // Remove -ERR, trim spaces and quotes, and convert to lower case. - proto = normalizeErr(proto) - - // Check if this is an auth error - if authErr := checkAuthError(strings.ToLower(proto)); authErr != nil { - // This will schedule an async error if we are in reconnect, - // and keep track of the auth error for the current server. - // If we have got the same error twice, this sets nc.ar to true to - // indicate that the reconnect should be aborted (will be checked - // in doReconnect()). - nc.processAuthError(authErr) - } - return &natsProtoErr{proto} - } - - // Notify that we got an unexpected protocol. - return fmt.Errorf("nats: expected '%s', got '%s'", _PONG_OP_, proto) - } - - // This is where we are truly connected. - nc.changeConnStatus(CONNECTED) - - return nil -} - -// reads a protocol line. -func (nc *Conn) readProto() (string, error) { - return nc.br.ReadString('\n') -} - -// A control protocol line. -type control struct { - op, args string -} - -// Read a control line and process the intended op. -func (nc *Conn) readOp(c *control) error { - line, err := nc.readProto() - if err != nil { - return err - } - parseControl(line, c) - return nil -} - -// Parse a control line from the server. -func parseControl(line string, c *control) { - toks := strings.SplitN(line, _SPC_, 2) - if len(toks) == 1 { - c.op = strings.TrimSpace(toks[0]) - c.args = _EMPTY_ - } else if len(toks) == 2 { - c.op, c.args = strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1]) - } else { - c.op = _EMPTY_ - } -} - -// flushReconnectPendingItems will push the pending items that were -// gathered while we were in a RECONNECTING state to the socket. -func (nc *Conn) flushReconnectPendingItems() error { - return nc.bw.flushPendingBuffer() -} - -// Stops the ping timer if set. -// Connection lock is held on entry. -func (nc *Conn) stopPingTimer() { - if nc.ptmr != nil { - nc.ptmr.Stop() - } -} - -// Try to reconnect using the option parameters. -// This function assumes we are allowed to reconnect. -func (nc *Conn) doReconnect(err error) { - // We want to make sure we have the other watchers shutdown properly - // here before we proceed past this point. - nc.waitForExits() - - // FIXME(dlc) - We have an issue here if we have - // outstanding flush points (pongs) and they were not - // sent out, but are still in the pipe. - - // Hold the lock manually and release where needed below, - // can't do defer here. - nc.mu.Lock() - - // Clear any errors. - nc.err = nil - // Perform appropriate callback if needed for a disconnect. - // DisconnectedErrCB has priority over deprecated DisconnectedCB - if !nc.initc { - if nc.Opts.DisconnectedErrCB != nil { - nc.ach.push(func() { nc.Opts.DisconnectedErrCB(nc, err) }) - } else if nc.Opts.DisconnectedCB != nil { - nc.ach.push(func() { nc.Opts.DisconnectedCB(nc) }) - } - } - - // This is used to wait on go routines exit if we start them in the loop - // but an error occurs after that. - waitForGoRoutines := false - var rt *time.Timer - // Channel used to kick routine out of sleep when conn is closed. - rqch := nc.rqch - // Counter that is increased when the whole list of servers has been tried. - var wlf int - - var jitter time.Duration - var rw time.Duration - // If a custom reconnect delay handler is set, this takes precedence. - crd := nc.Opts.CustomReconnectDelayCB - if crd == nil { - rw = nc.Opts.ReconnectWait - // TODO: since we sleep only after the whole list has been tried, we can't - // rely on individual *srv to know if it is a TLS or non-TLS url. - // We have to pick which type of jitter to use, for now, we use these hints: - jitter = nc.Opts.ReconnectJitter - if nc.Opts.Secure || nc.Opts.TLSConfig != nil { - jitter = nc.Opts.ReconnectJitterTLS - } - } - - for i := 0; len(nc.srvPool) > 0; { - cur, err := nc.selectNextServer() - if err != nil { - nc.err = err - break - } - - doSleep := i+1 >= len(nc.srvPool) - nc.mu.Unlock() - - if !doSleep { - i++ - // Release the lock to give a chance to a concurrent nc.Close() to break the loop. - runtime.Gosched() - } else { - i = 0 - var st time.Duration - if crd != nil { - wlf++ - st = crd(wlf) - } else { - st = rw - if jitter > 0 { - st += time.Duration(rand.Int63n(int64(jitter))) - } - } - if rt == nil { - rt = time.NewTimer(st) - } else { - rt.Reset(st) - } - select { - case <-rqch: - rt.Stop() - case <-rt.C: - } - } - // If the readLoop, etc.. go routines were started, wait for them to complete. - if waitForGoRoutines { - nc.waitForExits() - waitForGoRoutines = false - } - nc.mu.Lock() - - // Check if we have been closed first. - if nc.isClosed() { - break - } - - // Mark that we tried a reconnect - cur.reconnects++ - - // Try to create a new connection - err = nc.createConn() - - // Not yet connected, retry... - // Continue to hold the lock - if err != nil { - nc.err = nil - continue - } - - // We are reconnected - nc.Reconnects++ - - // Process connect logic - if nc.err = nc.processConnectInit(); nc.err != nil { - // Check if we should abort reconnect. If so, break out - // of the loop and connection will be closed. - if nc.ar { - break - } - nc.changeConnStatus(RECONNECTING) - continue - } - - // Clear possible lastErr under the connection lock after - // a successful processConnectInit(). - nc.current.lastErr = nil - - // Clear out server stats for the server we connected to.. - cur.didConnect = true - cur.reconnects = 0 - - // Send existing subscription state - nc.resendSubscriptions() - - // Now send off and clear pending buffer - nc.err = nc.flushReconnectPendingItems() - if nc.err != nil { - nc.changeConnStatus(RECONNECTING) - // Stop the ping timer (if set) - nc.stopPingTimer() - // Since processConnectInit() returned without error, the - // go routines were started, so wait for them to return - // on the next iteration (after releasing the lock). - waitForGoRoutines = true - continue - } - - // Done with the pending buffer - nc.bw.doneWithPending() - - // This is where we are truly connected. - nc.status = CONNECTED - - // If we are here with a retry on failed connect, indicate that the - // initial connect is now complete. - nc.initc = false - - // Queue up the reconnect callback. - if nc.Opts.ReconnectedCB != nil { - nc.ach.push(func() { nc.Opts.ReconnectedCB(nc) }) - } - - // Release lock here, we will return below. - nc.mu.Unlock() - - // Make sure to flush everything - nc.Flush() - - return - } - - // Call into close.. We have no servers left.. - if nc.err == nil { - nc.err = ErrNoServers - } - nc.mu.Unlock() - nc.close(CLOSED, true, nil) -} - -// processOpErr handles errors from reading or parsing the protocol. -// The lock should not be held entering this function. -func (nc *Conn) processOpErr(err error) { - nc.mu.Lock() - if nc.isConnecting() || nc.isClosed() || nc.isReconnecting() { - nc.mu.Unlock() - return - } - - if nc.Opts.AllowReconnect && nc.status == CONNECTED { - // Set our new status - nc.changeConnStatus(RECONNECTING) - // Stop ping timer if set - nc.stopPingTimer() - if nc.conn != nil { - nc.conn.Close() - nc.conn = nil - } - - // Create pending buffer before reconnecting. - nc.bw.switchToPending() - - // Clear any queued pongs, e.g. pending flush calls. - nc.clearPendingFlushCalls() - - go nc.doReconnect(err) - nc.mu.Unlock() - return - } - - nc.changeConnStatus(DISCONNECTED) - nc.err = err - nc.mu.Unlock() - nc.close(CLOSED, true, nil) -} - -// dispatch is responsible for calling any async callbacks -func (ac *asyncCallbacksHandler) asyncCBDispatcher() { - for { - ac.mu.Lock() - // Protect for spurious wakeups. We should get out of the - // wait only if there is an element to pop from the list. - for ac.head == nil { - ac.cond.Wait() - } - cur := ac.head - ac.head = cur.next - if cur == ac.tail { - ac.tail = nil - } - ac.mu.Unlock() - - // This signals that the dispatcher has been closed and all - // previous callbacks have been dispatched. - if cur.f == nil { - return - } - // Invoke callback outside of handler's lock - cur.f() - } -} - -// Add the given function to the tail of the list and -// signals the dispatcher. -func (ac *asyncCallbacksHandler) push(f func()) { - ac.pushOrClose(f, false) -} - -// Signals that we are closing... -func (ac *asyncCallbacksHandler) close() { - ac.pushOrClose(nil, true) -} - -// Add the given function to the tail of the list and -// signals the dispatcher. -func (ac *asyncCallbacksHandler) pushOrClose(f func(), close bool) { - ac.mu.Lock() - defer ac.mu.Unlock() - // Make sure that library is not calling push with nil function, - // since this is used to notify the dispatcher that it should stop. - if !close && f == nil { - panic("pushing a nil callback") - } - cb := &asyncCB{f: f} - if ac.tail != nil { - ac.tail.next = cb - } else { - ac.head = cb - } - ac.tail = cb - if close { - ac.cond.Broadcast() - } else { - ac.cond.Signal() - } -} - -// readLoop() will sit on the socket reading and processing the -// protocol from the server. It will dispatch appropriately based -// on the op type. -func (nc *Conn) readLoop() { - // Release the wait group on exit - defer nc.wg.Done() - - // Create a parseState if needed. - nc.mu.Lock() - if nc.ps == nil { - nc.ps = &parseState{} - } - conn := nc.conn - br := nc.br - nc.mu.Unlock() - - if conn == nil { - return - } - - for { - buf, err := br.Read() - if err == nil { - // With websocket, it is possible that there is no error but - // also no buffer returned (either WS control message or read of a - // partial compressed message). We could call parse(buf) which - // would ignore an empty buffer, but simply go back to top of the loop. - if len(buf) == 0 { - continue - } - err = nc.parse(buf) - } - if err != nil { - nc.processOpErr(err) - break - } - } - // Clear the parseState here.. - nc.mu.Lock() - nc.ps = nil - nc.mu.Unlock() -} - -// waitForMsgs waits on the conditional shared with readLoop and processMsg. -// It is used to deliver messages to asynchronous subscribers. -func (nc *Conn) waitForMsgs(s *Subscription) { - var closed bool - var delivered, max uint64 - - // Used to account for adjustments to sub.pBytes when we wrap back around. - msgLen := -1 - - for { - s.mu.Lock() - // Do accounting for last msg delivered here so we only lock once - // and drain state trips after callback has returned. - if msgLen >= 0 { - s.pMsgs-- - s.pBytes -= msgLen - msgLen = -1 - } - - if s.pHead == nil && !s.closed { - s.pCond.Wait() - } - // Pop the msg off the list - m := s.pHead - if m != nil { - s.pHead = m.next - if s.pHead == nil { - s.pTail = nil - } - if m.barrier != nil { - s.mu.Unlock() - if atomic.AddInt64(&m.barrier.refs, -1) == 0 { - m.barrier.f() - } - continue - } - msgLen = len(m.Data) - } - mcb := s.mcb - max = s.max - closed = s.closed - var fcReply string - if !s.closed { - s.delivered++ - delivered = s.delivered - if s.jsi != nil { - fcReply = s.checkForFlowControlResponse() - } - } - s.mu.Unlock() - - // Respond to flow control if applicable - if fcReply != _EMPTY_ { - nc.Publish(fcReply, nil) - } - - if closed { - break - } - - // Deliver the message. - if m != nil && (max == 0 || delivered <= max) { - mcb(m) - } - // If we have hit the max for delivered msgs, remove sub. - if max > 0 && delivered >= max { - nc.mu.Lock() - nc.removeSub(s) - nc.mu.Unlock() - break - } - } - // Check for barrier messages - s.mu.Lock() - for m := s.pHead; m != nil; m = s.pHead { - if m.barrier != nil { - s.mu.Unlock() - if atomic.AddInt64(&m.barrier.refs, -1) == 0 { - m.barrier.f() - } - s.mu.Lock() - } - s.pHead = m.next - } - // Now check for pDone - done := s.pDone - s.mu.Unlock() - - if done != nil { - done(s.Subject) - } -} - -// Used for debugging and simulating loss for certain tests. -// Return what is to be used. If we return nil the message will be dropped. -type msgFilter func(m *Msg) *Msg - -func (nc *Conn) addMsgFilter(subject string, filter msgFilter) { - nc.subsMu.Lock() - defer nc.subsMu.Unlock() - - if nc.filters == nil { - nc.filters = make(map[string]msgFilter) - } - nc.filters[subject] = filter -} - -func (nc *Conn) removeMsgFilter(subject string) { - nc.subsMu.Lock() - defer nc.subsMu.Unlock() - - if nc.filters != nil { - delete(nc.filters, subject) - if len(nc.filters) == 0 { - nc.filters = nil - } - } -} - -// processMsg is called by parse and will place the msg on the -// appropriate channel/pending queue for processing. If the channel is full, -// or the pending queue is over the pending limits, the connection is -// considered a slow consumer. -func (nc *Conn) processMsg(data []byte) { - // Stats - atomic.AddUint64(&nc.InMsgs, 1) - atomic.AddUint64(&nc.InBytes, uint64(len(data))) - - // Don't lock the connection to avoid server cutting us off if the - // flusher is holding the connection lock, trying to send to the server - // that is itself trying to send data to us. - nc.subsMu.RLock() - sub := nc.subs[nc.ps.ma.sid] - var mf msgFilter - if nc.filters != nil { - mf = nc.filters[string(nc.ps.ma.subject)] - } - nc.subsMu.RUnlock() - - if sub == nil { - return - } - - // Copy them into string - subj := string(nc.ps.ma.subject) - reply := string(nc.ps.ma.reply) - - // Doing message create outside of the sub's lock to reduce contention. - // It's possible that we end-up not using the message, but that's ok. - - // FIXME(dlc): Need to copy, should/can do COW? - var msgPayload = data - if !nc.ps.msgCopied { - msgPayload = make([]byte, len(data)) - copy(msgPayload, data) - } - - // Check if we have headers encoded here. - var h Header - var err error - var ctrlMsg bool - var ctrlType int - var fcReply string - - if nc.ps.ma.hdr > 0 { - hbuf := msgPayload[:nc.ps.ma.hdr] - msgPayload = msgPayload[nc.ps.ma.hdr:] - h, err = DecodeHeadersMsg(hbuf) - if err != nil { - // We will pass the message through but send async error. - nc.mu.Lock() - nc.err = ErrBadHeaderMsg - if nc.Opts.AsyncErrorCB != nil { - nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, sub, ErrBadHeaderMsg) }) - } - nc.mu.Unlock() - } - } - - // FIXME(dlc): Should we recycle these containers? - m := &Msg{ - Subject: subj, - Reply: reply, - Header: h, - Data: msgPayload, - Sub: sub, - wsz: len(data) + len(subj) + len(reply), - } - - // Check for message filters. - if mf != nil { - if m = mf(m); m == nil { - // Drop message. - return - } - } - - sub.mu.Lock() - - // Check if closed. - if sub.closed { - sub.mu.Unlock() - return - } - - // Skip flow control messages in case of using a JetStream context. - jsi := sub.jsi - if jsi != nil { - // There has to be a header for it to be a control message. - if h != nil { - ctrlMsg, ctrlType = isJSControlMessage(m) - if ctrlMsg && ctrlType == jsCtrlHB { - // Check if the heartbeat has a "Consumer Stalled" header, if - // so, the value is the FC reply to send a nil message to. - // We will send it at the end of this function. - fcReply = m.Header.Get(consumerStalledHdr) - } - } - // Check for ordered consumer here. If checkOrderedMsgs returns true that means it detected a gap. - if !ctrlMsg && jsi.ordered && sub.checkOrderedMsgs(m) { - sub.mu.Unlock() - return - } - } - - // Skip processing if this is a control message and - // if not a pull consumer heartbeat. For pull consumers, - // heartbeats have to be handled on per request basis. - if !ctrlMsg || (jsi != nil && jsi.pull) { - var chanSubCheckFC bool - // Subscription internal stats (applicable only for non ChanSubscription's) - if sub.typ != ChanSubscription { - sub.pMsgs++ - if sub.pMsgs > sub.pMsgsMax { - sub.pMsgsMax = sub.pMsgs - } - sub.pBytes += len(m.Data) - if sub.pBytes > sub.pBytesMax { - sub.pBytesMax = sub.pBytes - } - - // Check for a Slow Consumer - if (sub.pMsgsLimit > 0 && sub.pMsgs > sub.pMsgsLimit) || - (sub.pBytesLimit > 0 && sub.pBytes > sub.pBytesLimit) { - goto slowConsumer - } - } else if jsi != nil { - chanSubCheckFC = true - } - - // We have two modes of delivery. One is the channel, used by channel - // subscribers and syncSubscribers, the other is a linked list for async. - if sub.mch != nil { - select { - case sub.mch <- m: - default: - goto slowConsumer - } - } else { - // Push onto the async pList - if sub.pHead == nil { - sub.pHead = m - sub.pTail = m - if sub.pCond != nil { - sub.pCond.Signal() - } - } else { - sub.pTail.next = m - sub.pTail = m - } - } - if jsi != nil { - // Store the ACK metadata from the message to - // compare later on with the received heartbeat. - sub.trackSequences(m.Reply) - if chanSubCheckFC { - // For ChanSubscription, since we can't call this when a message - // is "delivered" (since user is pull from their own channel), - // we have a go routine that does this check, however, we do it - // also here to make it much more responsive. The go routine is - // really to avoid stalling when there is no new messages coming. - fcReply = sub.checkForFlowControlResponse() - } - } - } else if ctrlType == jsCtrlFC && m.Reply != _EMPTY_ { - // This is a flow control message. - // We will schedule the send of the FC reply once we have delivered the - // DATA message that was received before this flow control message, which - // has sequence `jsi.fciseq`. However, it is possible that this message - // has already been delivered, in that case, we need to send the FC reply now. - if sub.getJSDelivered() >= jsi.fciseq { - fcReply = m.Reply - } else { - // Schedule a reply after the previous message is delivered. - sub.scheduleFlowControlResponse(m.Reply) - } - } - - // Clear any SlowConsumer status. - sub.sc = false - sub.mu.Unlock() - - if fcReply != _EMPTY_ { - nc.Publish(fcReply, nil) - } - - // Handle control heartbeat messages. - if ctrlMsg && ctrlType == jsCtrlHB && m.Reply == _EMPTY_ { - nc.checkForSequenceMismatch(m, sub, jsi) - } - - return - -slowConsumer: - sub.dropped++ - sc := !sub.sc - sub.sc = true - // Undo stats from above - if sub.typ != ChanSubscription { - sub.pMsgs-- - sub.pBytes -= len(m.Data) - } - sub.mu.Unlock() - if sc { - // Now we need connection's lock and we may end-up in the situation - // that we were trying to avoid, except that in this case, the client - // is already experiencing client-side slow consumer situation. - nc.mu.Lock() - nc.err = ErrSlowConsumer - if nc.Opts.AsyncErrorCB != nil { - nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, sub, ErrSlowConsumer) }) - } - nc.mu.Unlock() - } -} - -// processPermissionsViolation is called when the server signals a subject -// permissions violation on either publish or subscribe. -func (nc *Conn) processPermissionsViolation(err string) { - nc.mu.Lock() - // create error here so we can pass it as a closure to the async cb dispatcher. - e := errors.New("nats: " + err) - nc.err = e - if nc.Opts.AsyncErrorCB != nil { - nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, e) }) - } - nc.mu.Unlock() -} - -// processAuthError generally processing for auth errors. We want to do retries -// unless we get the same error again. This allows us for instance to swap credentials -// and have the app reconnect, but if nothing is changing we should bail. -// This function will return true if the connection should be closed, false otherwise. -// Connection lock is held on entry -func (nc *Conn) processAuthError(err error) bool { - nc.err = err - if !nc.initc && nc.Opts.AsyncErrorCB != nil { - nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) - } - // We should give up if we tried twice on this server and got the - // same error. This behavior can be modified using IgnoreAuthErrorAbort. - if nc.current.lastErr == err && !nc.Opts.IgnoreAuthErrorAbort { - nc.ar = true - } else { - nc.current.lastErr = err - } - return nc.ar -} - -// flusher is a separate Go routine that will process flush requests for the write -// bufio. This allows coalescing of writes to the underlying socket. -func (nc *Conn) flusher() { - // Release the wait group - defer nc.wg.Done() - - // snapshot the bw and conn since they can change from underneath of us. - nc.mu.Lock() - bw := nc.bw - conn := nc.conn - fch := nc.fch - nc.mu.Unlock() - - if conn == nil || bw == nil { - return - } - - for { - if _, ok := <-fch; !ok { - return - } - nc.mu.Lock() - - // Check to see if we should bail out. - if !nc.isConnected() || nc.isConnecting() || conn != nc.conn { - nc.mu.Unlock() - return - } - if bw.buffered() > 0 { - if err := bw.flush(); err != nil { - if nc.err == nil { - nc.err = err - } - if nc.Opts.AsyncErrorCB != nil { - nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) - } - } - } - nc.mu.Unlock() - } -} - -// processPing will send an immediate pong protocol response to the -// server. The server uses this mechanism to detect dead clients. -func (nc *Conn) processPing() { - nc.sendProto(pongProto) -} - -// processPong is used to process responses to the client's ping -// messages. We use pings for the flush mechanism as well. -func (nc *Conn) processPong() { - var ch chan struct{} - - nc.mu.Lock() - if len(nc.pongs) > 0 { - ch = nc.pongs[0] - nc.pongs = append(nc.pongs[:0], nc.pongs[1:]...) - } - nc.pout = 0 - nc.mu.Unlock() - if ch != nil { - ch <- struct{}{} - } -} - -// processOK is a placeholder for processing OK messages. -func (nc *Conn) processOK() { - // do nothing -} - -// processInfo is used to parse the info messages sent -// from the server. -// This function may update the server pool. -func (nc *Conn) processInfo(info string) error { - if info == _EMPTY_ { - return nil - } - var ncInfo serverInfo - if err := json.Unmarshal([]byte(info), &ncInfo); err != nil { - return err - } - - // Copy content into connection's info structure. - nc.info = ncInfo - // The array could be empty/not present on initial connect, - // if advertise is disabled on that server, or servers that - // did not include themselves in the async INFO protocol. - // If empty, do not remove the implicit servers from the pool. - if len(nc.info.ConnectURLs) == 0 { - if !nc.initc && ncInfo.LameDuckMode && nc.Opts.LameDuckModeHandler != nil { - nc.ach.push(func() { nc.Opts.LameDuckModeHandler(nc) }) - } - return nil - } - // Note about pool randomization: when the pool was first created, - // it was randomized (if allowed). We keep the order the same (removing - // implicit servers that are no longer sent to us). New URLs are sent - // to us in no specific order so don't need extra randomization. - hasNew := false - // This is what we got from the server we are connected to. - urls := nc.info.ConnectURLs - // Transform that to a map for easy lookups - tmp := make(map[string]struct{}, len(urls)) - for _, curl := range urls { - tmp[curl] = struct{}{} - } - // Walk the pool and removed the implicit servers that are no longer in the - // given array/map - sp := nc.srvPool - for i := 0; i < len(sp); i++ { - srv := sp[i] - curl := srv.url.Host - // Check if this URL is in the INFO protocol - _, inInfo := tmp[curl] - // Remove from the temp map so that at the end we are left with only - // new (or restarted) servers that need to be added to the pool. - delete(tmp, curl) - // Keep servers that were set through Options, but also the one that - // we are currently connected to (even if it is a discovered server). - if !srv.isImplicit || srv.url == nc.current.url { - continue - } - if !inInfo { - // Remove from server pool. Keep current order. - copy(sp[i:], sp[i+1:]) - nc.srvPool = sp[:len(sp)-1] - sp = nc.srvPool - i-- - } - } - // Figure out if we should save off the current non-IP hostname if we encounter a bare IP. - saveTLS := nc.current != nil && !hostIsIP(nc.current.url) - - // If there are any left in the tmp map, these are new (or restarted) servers - // and need to be added to the pool. - for curl := range tmp { - // Before adding, check if this is a new (as in never seen) URL. - // This is used to figure out if we invoke the DiscoveredServersCB - if _, present := nc.urls[curl]; !present { - hasNew = true - } - nc.addURLToPool(fmt.Sprintf("%s://%s", nc.connScheme(), curl), true, saveTLS) - } - if hasNew { - // Randomize the pool if allowed but leave the first URL in place. - if !nc.Opts.NoRandomize { - nc.shufflePool(1) - } - if !nc.initc && nc.Opts.DiscoveredServersCB != nil { - nc.ach.push(func() { nc.Opts.DiscoveredServersCB(nc) }) - } - } - if !nc.initc && ncInfo.LameDuckMode && nc.Opts.LameDuckModeHandler != nil { - nc.ach.push(func() { nc.Opts.LameDuckModeHandler(nc) }) - } - return nil -} - -// processAsyncInfo does the same than processInfo, but is called -// from the parser. Calls processInfo under connection's lock -// protection. -func (nc *Conn) processAsyncInfo(info []byte) { - nc.mu.Lock() - // Ignore errors, we will simply not update the server pool... - nc.processInfo(string(info)) - nc.mu.Unlock() -} - -// LastError reports the last error encountered via the connection. -// It can be used reliably within ClosedCB in order to find out reason -// why connection was closed for example. -func (nc *Conn) LastError() error { - if nc == nil { - return ErrInvalidConnection - } - nc.mu.RLock() - err := nc.err - nc.mu.RUnlock() - return err -} - -// Check if the given error string is an auth error, and if so returns -// the corresponding ErrXXX error, nil otherwise -func checkAuthError(e string) error { - if strings.HasPrefix(e, AUTHORIZATION_ERR) { - return ErrAuthorization - } - if strings.HasPrefix(e, AUTHENTICATION_EXPIRED_ERR) { - return ErrAuthExpired - } - if strings.HasPrefix(e, AUTHENTICATION_REVOKED_ERR) { - return ErrAuthRevoked - } - if strings.HasPrefix(e, ACCOUNT_AUTHENTICATION_EXPIRED_ERR) { - return ErrAccountAuthExpired - } - return nil -} - -// processErr processes any error messages from the server and -// sets the connection's LastError. -func (nc *Conn) processErr(ie string) { - // Trim, remove quotes - ne := normalizeErr(ie) - // convert to lower case. - e := strings.ToLower(ne) - - close := false - - // FIXME(dlc) - process Slow Consumer signals special. - if e == STALE_CONNECTION { - nc.processOpErr(ErrStaleConnection) - } else if e == MAX_CONNECTIONS_ERR { - nc.processOpErr(ErrMaxConnectionsExceeded) - } else if strings.HasPrefix(e, PERMISSIONS_ERR) { - nc.processPermissionsViolation(ne) - } else if authErr := checkAuthError(e); authErr != nil { - nc.mu.Lock() - close = nc.processAuthError(authErr) - nc.mu.Unlock() - } else { - close = true - nc.mu.Lock() - nc.err = errors.New("nats: " + ne) - nc.mu.Unlock() - } - if close { - nc.close(CLOSED, true, nil) - } -} - -// kickFlusher will send a bool on a channel to kick the -// flush Go routine to flush data to the server. -func (nc *Conn) kickFlusher() { - if nc.bw != nil { - select { - case nc.fch <- struct{}{}: - default: - } - } -} - -// Publish publishes the data argument to the given subject. The data -// argument is left untouched and needs to be correctly interpreted on -// the receiver. -func (nc *Conn) Publish(subj string, data []byte) error { - return nc.publish(subj, _EMPTY_, nil, data) -} - -// Header represents the optional Header for a NATS message, -// based on the implementation of http.Header. -type Header map[string][]string - -// Add adds the key, value pair to the header. It is case-sensitive -// and appends to any existing values associated with key. -func (h Header) Add(key, value string) { - h[key] = append(h[key], value) -} - -// Set sets the header entries associated with key to the single -// element value. It is case-sensitive and replaces any existing -// values associated with key. -func (h Header) Set(key, value string) { - h[key] = []string{value} -} - -// Get gets the first value associated with the given key. -// It is case-sensitive. -func (h Header) Get(key string) string { - if h == nil { - return _EMPTY_ - } - if v := h[key]; v != nil { - return v[0] - } - return _EMPTY_ -} - -// Values returns all values associated with the given key. -// It is case-sensitive. -func (h Header) Values(key string) []string { - return h[key] -} - -// Del deletes the values associated with a key. -// It is case-sensitive. -func (h Header) Del(key string) { - delete(h, key) -} - -// NewMsg creates a message for publishing that will use headers. -func NewMsg(subject string) *Msg { - return &Msg{ - Subject: subject, - Header: make(Header), - } -} - -const ( - hdrLine = "NATS/1.0\r\n" - crlf = "\r\n" - hdrPreEnd = len(hdrLine) - len(crlf) - statusHdr = "Status" - descrHdr = "Description" - lastConsumerSeqHdr = "Nats-Last-Consumer" - lastStreamSeqHdr = "Nats-Last-Stream" - consumerStalledHdr = "Nats-Consumer-Stalled" - noResponders = "503" - noMessagesSts = "404" - reqTimeoutSts = "408" - jetStream409Sts = "409" - controlMsg = "100" - statusLen = 3 // e.g. 20x, 40x, 50x -) - -// DecodeHeadersMsg will decode and headers. -func DecodeHeadersMsg(data []byte) (Header, error) { - br := bufio.NewReaderSize(bytes.NewReader(data), 128) - tp := textproto.NewReader(br) - l, err := tp.ReadLine() - if err != nil || len(l) < hdrPreEnd || l[:hdrPreEnd] != hdrLine[:hdrPreEnd] { - return nil, ErrBadHeaderMsg - } - - mh, err := readMIMEHeader(tp) - if err != nil { - return nil, err - } - - // Check if we have an inlined status. - if len(l) > hdrPreEnd { - var description string - status := strings.TrimSpace(l[hdrPreEnd:]) - if len(status) != statusLen { - description = strings.TrimSpace(status[statusLen:]) - status = status[:statusLen] - } - mh.Add(statusHdr, status) - if len(description) > 0 { - mh.Add(descrHdr, description) - } - } - return Header(mh), nil -} - -// readMIMEHeader returns a MIMEHeader that preserves the -// original case of the MIME header, based on the implementation -// of textproto.ReadMIMEHeader. -// -// https://golang.org/pkg/net/textproto/#Reader.ReadMIMEHeader -func readMIMEHeader(tp *textproto.Reader) (textproto.MIMEHeader, error) { - m := make(textproto.MIMEHeader) - for { - kv, err := tp.ReadLine() - if len(kv) == 0 { - return m, err - } - - // Process key fetching original case. - i := bytes.IndexByte([]byte(kv), ':') - if i < 0 { - return nil, ErrBadHeaderMsg - } - key := kv[:i] - if key == "" { - // Skip empty keys. - continue - } - i++ - for i < len(kv) && (kv[i] == ' ' || kv[i] == '\t') { - i++ - } - value := string(kv[i:]) - m[key] = append(m[key], value) - if err != nil { - return m, err - } - } -} - -// PublishMsg publishes the Msg structure, which includes the -// Subject, an optional Reply and an optional Data field. -func (nc *Conn) PublishMsg(m *Msg) error { - if m == nil { - return ErrInvalidMsg - } - hdr, err := m.headerBytes() - if err != nil { - return err - } - return nc.publish(m.Subject, m.Reply, hdr, m.Data) -} - -// PublishRequest will perform a Publish() expecting a response on the -// reply subject. Use Request() for automatically waiting for a response -// inline. -func (nc *Conn) PublishRequest(subj, reply string, data []byte) error { - return nc.publish(subj, reply, nil, data) -} - -// Used for handrolled Itoa -const digits = "0123456789" - -// publish is the internal function to publish messages to a nats-server. -// Sends a protocol data message by queuing into the bufio writer -// and kicking the flush go routine. These writes should be protected. -func (nc *Conn) publish(subj, reply string, hdr, data []byte) error { - if nc == nil { - return ErrInvalidConnection - } - if subj == "" { - return ErrBadSubject - } - nc.mu.Lock() - - // Check if headers attempted to be sent to server that does not support them. - if len(hdr) > 0 && !nc.info.Headers { - nc.mu.Unlock() - return ErrHeadersNotSupported - } - - if nc.isClosed() { - nc.mu.Unlock() - return ErrConnectionClosed - } - - if nc.isDrainingPubs() { - nc.mu.Unlock() - return ErrConnectionDraining - } - - // Proactively reject payloads over the threshold set by server. - msgSize := int64(len(data) + len(hdr)) - // Skip this check if we are not yet connected (RetryOnFailedConnect) - if !nc.initc && msgSize > nc.info.MaxPayload { - nc.mu.Unlock() - return ErrMaxPayload - } - - // Check if we are reconnecting, and if so check if - // we have exceeded our reconnect outbound buffer limits. - if nc.bw.atLimitIfUsingPending() { - nc.mu.Unlock() - return ErrReconnectBufExceeded - } - - var mh []byte - if hdr != nil { - mh = nc.scratch[:len(_HPUB_P_)] - } else { - mh = nc.scratch[1:len(_HPUB_P_)] - } - mh = append(mh, subj...) - mh = append(mh, ' ') - if reply != "" { - mh = append(mh, reply...) - mh = append(mh, ' ') - } - - // We could be smarter here, but simple loop is ok, - // just avoid strconv in fast path. - // FIXME(dlc) - Find a better way here. - // msgh = strconv.AppendInt(msgh, int64(len(data)), 10) - // go 1.14 some values strconv faster, may be able to switch over. - - var b [12]byte - var i = len(b) - - if hdr != nil { - if len(hdr) > 0 { - for l := len(hdr); l > 0; l /= 10 { - i-- - b[i] = digits[l%10] - } - } else { - i-- - b[i] = digits[0] - } - mh = append(mh, b[i:]...) - mh = append(mh, ' ') - // reset for below. - i = len(b) - } - - if msgSize > 0 { - for l := msgSize; l > 0; l /= 10 { - i-- - b[i] = digits[l%10] - } - } else { - i-- - b[i] = digits[0] - } - - mh = append(mh, b[i:]...) - mh = append(mh, _CRLF_...) - - if err := nc.bw.appendBufs(mh, hdr, data, _CRLF_BYTES_); err != nil { - nc.mu.Unlock() - return err - } - - nc.OutMsgs++ - nc.OutBytes += uint64(len(data) + len(hdr)) - - if len(nc.fch) == 0 { - nc.kickFlusher() - } - nc.mu.Unlock() - return nil -} - -// respHandler is the global response handler. It will look up -// the appropriate channel based on the last token and place -// the message on the channel if possible. -func (nc *Conn) respHandler(m *Msg) { - nc.mu.Lock() - - // Just return if closed. - if nc.isClosed() { - nc.mu.Unlock() - return - } - - var mch chan *Msg - - // Grab mch - rt := nc.respToken(m.Subject) - if rt != _EMPTY_ { - mch = nc.respMap[rt] - // Delete the key regardless, one response only. - delete(nc.respMap, rt) - } else if len(nc.respMap) == 1 { - // If the server has rewritten the subject, the response token (rt) - // will not match (could be the case with JetStream). If that is the - // case and there is a single entry, use that. - for k, v := range nc.respMap { - mch = v - delete(nc.respMap, k) - break - } - } - nc.mu.Unlock() - - // Don't block, let Request timeout instead, mch is - // buffered and we should delete the key before a - // second response is processed. - select { - case mch <- m: - default: - return - } -} - -// Helper to setup and send new request style requests. Return the chan to receive the response. -func (nc *Conn) createNewRequestAndSend(subj string, hdr, data []byte) (chan *Msg, string, error) { - nc.mu.Lock() - // Do setup for the new style if needed. - if nc.respMap == nil { - nc.initNewResp() - } - // Create new literal Inbox and map to a chan msg. - mch := make(chan *Msg, RequestChanLen) - respInbox := nc.newRespInbox() - token := respInbox[nc.respSubLen:] - - nc.respMap[token] = mch - if nc.respMux == nil { - // Create the response subscription we will use for all new style responses. - // This will be on an _INBOX with an additional terminal token. The subscription - // will be on a wildcard. - s, err := nc.subscribeLocked(nc.respSub, _EMPTY_, nc.respHandler, nil, false, nil) - if err != nil { - nc.mu.Unlock() - return nil, token, err - } - nc.respScanf = strings.Replace(nc.respSub, "*", "%s", -1) - nc.respMux = s - } - nc.mu.Unlock() - - if err := nc.publish(subj, respInbox, hdr, data); err != nil { - return nil, token, err - } - - return mch, token, nil -} - -// RequestMsg will send a request payload including optional headers and deliver -// the response message, or an error, including a timeout if no message was received properly. -func (nc *Conn) RequestMsg(msg *Msg, timeout time.Duration) (*Msg, error) { - if msg == nil { - return nil, ErrInvalidMsg - } - hdr, err := msg.headerBytes() - if err != nil { - return nil, err - } - - return nc.request(msg.Subject, hdr, msg.Data, timeout) -} - -// Request will send a request payload and deliver the response message, -// or an error, including a timeout if no message was received properly. -func (nc *Conn) Request(subj string, data []byte, timeout time.Duration) (*Msg, error) { - return nc.request(subj, nil, data, timeout) -} - -func (nc *Conn) useOldRequestStyle() bool { - nc.mu.RLock() - r := nc.Opts.UseOldRequestStyle - nc.mu.RUnlock() - return r -} - -func (nc *Conn) request(subj string, hdr, data []byte, timeout time.Duration) (*Msg, error) { - if nc == nil { - return nil, ErrInvalidConnection - } - - var m *Msg - var err error - - if nc.useOldRequestStyle() { - m, err = nc.oldRequest(subj, hdr, data, timeout) - } else { - m, err = nc.newRequest(subj, hdr, data, timeout) - } - - // Check for no responder status. - if err == nil && len(m.Data) == 0 && m.Header.Get(statusHdr) == noResponders { - m, err = nil, ErrNoResponders - } - return m, err -} - -func (nc *Conn) newRequest(subj string, hdr, data []byte, timeout time.Duration) (*Msg, error) { - mch, token, err := nc.createNewRequestAndSend(subj, hdr, data) - if err != nil { - return nil, err - } - - t := globalTimerPool.Get(timeout) - defer globalTimerPool.Put(t) - - var ok bool - var msg *Msg - - select { - case msg, ok = <-mch: - if !ok { - return nil, ErrConnectionClosed - } - case <-t.C: - nc.mu.Lock() - delete(nc.respMap, token) - nc.mu.Unlock() - return nil, ErrTimeout - } - - return msg, nil -} - -// oldRequest will create an Inbox and perform a Request() call -// with the Inbox reply and return the first reply received. -// This is optimized for the case of multiple responses. -func (nc *Conn) oldRequest(subj string, hdr, data []byte, timeout time.Duration) (*Msg, error) { - inbox := nc.NewInbox() - ch := make(chan *Msg, RequestChanLen) - - s, err := nc.subscribe(inbox, _EMPTY_, nil, ch, true, nil) - if err != nil { - return nil, err - } - s.AutoUnsubscribe(1) - defer s.Unsubscribe() - - err = nc.publish(subj, inbox, hdr, data) - if err != nil { - return nil, err - } - - return s.NextMsg(timeout) -} - -// InboxPrefix is the prefix for all inbox subjects. -const ( - InboxPrefix = "_INBOX." - inboxPrefixLen = len(InboxPrefix) - replySuffixLen = 8 // Gives us 62^8 - rdigits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" - base = 62 -) - -// NewInbox will return an inbox string which can be used for directed replies from -// subscribers. These are guaranteed to be unique, but can be shared and subscribed -// to by others. -func NewInbox() string { - var b [inboxPrefixLen + nuidSize]byte - pres := b[:inboxPrefixLen] - copy(pres, InboxPrefix) - ns := b[inboxPrefixLen:] - copy(ns, nuid.Next()) - return string(b[:]) -} - -// Create a new inbox that is prefix aware. -func (nc *Conn) NewInbox() string { - if nc.Opts.InboxPrefix == _EMPTY_ { - return NewInbox() - } - - var sb strings.Builder - sb.WriteString(nc.Opts.InboxPrefix) - sb.WriteByte('.') - sb.WriteString(nuid.Next()) - return sb.String() -} - -// Function to init new response structures. -func (nc *Conn) initNewResp() { - nc.respSubPrefix = fmt.Sprintf("%s.", nc.NewInbox()) - nc.respSubLen = len(nc.respSubPrefix) - nc.respSub = fmt.Sprintf("%s*", nc.respSubPrefix) - nc.respMap = make(map[string]chan *Msg) - nc.respRand = rand.New(rand.NewSource(time.Now().UnixNano())) -} - -// newRespInbox creates a new literal response subject -// that will trigger the mux subscription handler. -// Lock should be held. -func (nc *Conn) newRespInbox() string { - if nc.respMap == nil { - nc.initNewResp() - } - - var sb strings.Builder - sb.WriteString(nc.respSubPrefix) - - rn := nc.respRand.Int63() - for i := 0; i < replySuffixLen; i++ { - sb.WriteByte(rdigits[rn%base]) - rn /= base - } - - return sb.String() -} - -// NewRespInbox is the new format used for _INBOX. -func (nc *Conn) NewRespInbox() string { - nc.mu.Lock() - s := nc.newRespInbox() - nc.mu.Unlock() - return s -} - -// respToken will return the last token of a literal response inbox -// which we use for the message channel lookup. This needs to do a -// scan to protect itself against the server changing the subject. -// Lock should be held. -func (nc *Conn) respToken(respInbox string) string { - var token string - n, err := fmt.Sscanf(respInbox, nc.respScanf, &token) - if err != nil || n != 1 { - return "" - } - return token -} - -// Subscribe will express interest in the given subject. The subject -// can have wildcards. -// There are two type of wildcards: * for partial, and > for full. -// A subscription on subject time.*.east would receive messages sent to time.us.east and time.eu.east. -// A subscription on subject time.us.> would receive messages sent to -// time.us.east and time.us.east.atlanta, while time.us.* would only match time.us.east -// since it can't match more than one token. -// Messages will be delivered to the associated MsgHandler. -func (nc *Conn) Subscribe(subj string, cb MsgHandler) (*Subscription, error) { - return nc.subscribe(subj, _EMPTY_, cb, nil, false, nil) -} - -// ChanSubscribe will express interest in the given subject and place -// all messages received on the channel. -// You should not close the channel until sub.Unsubscribe() has been called. -func (nc *Conn) ChanSubscribe(subj string, ch chan *Msg) (*Subscription, error) { - return nc.subscribe(subj, _EMPTY_, nil, ch, false, nil) -} - -// ChanQueueSubscribe will express interest in the given subject. -// All subscribers with the same queue name will form the queue group -// and only one member of the group will be selected to receive any given message, -// which will be placed on the channel. -// You should not close the channel until sub.Unsubscribe() has been called. -// Note: This is the same than QueueSubscribeSyncWithChan. -func (nc *Conn) ChanQueueSubscribe(subj, group string, ch chan *Msg) (*Subscription, error) { - return nc.subscribe(subj, group, nil, ch, false, nil) -} - -// SubscribeSync will express interest on the given subject. Messages will -// be received synchronously using Subscription.NextMsg(). -func (nc *Conn) SubscribeSync(subj string) (*Subscription, error) { - if nc == nil { - return nil, ErrInvalidConnection - } - mch := make(chan *Msg, nc.Opts.SubChanLen) - return nc.subscribe(subj, _EMPTY_, nil, mch, true, nil) -} - -// QueueSubscribe creates an asynchronous queue subscriber on the given subject. -// All subscribers with the same queue name will form the queue group and -// only one member of the group will be selected to receive any given -// message asynchronously. -func (nc *Conn) QueueSubscribe(subj, queue string, cb MsgHandler) (*Subscription, error) { - return nc.subscribe(subj, queue, cb, nil, false, nil) -} - -// QueueSubscribeSync creates a synchronous queue subscriber on the given -// subject. All subscribers with the same queue name will form the queue -// group and only one member of the group will be selected to receive any -// given message synchronously using Subscription.NextMsg(). -func (nc *Conn) QueueSubscribeSync(subj, queue string) (*Subscription, error) { - mch := make(chan *Msg, nc.Opts.SubChanLen) - return nc.subscribe(subj, queue, nil, mch, true, nil) -} - -// QueueSubscribeSyncWithChan will express interest in the given subject. -// All subscribers with the same queue name will form the queue group -// and only one member of the group will be selected to receive any given message, -// which will be placed on the channel. -// You should not close the channel until sub.Unsubscribe() has been called. -// Note: This is the same than ChanQueueSubscribe. -func (nc *Conn) QueueSubscribeSyncWithChan(subj, queue string, ch chan *Msg) (*Subscription, error) { - return nc.subscribe(subj, queue, nil, ch, false, nil) -} - -// badSubject will do quick test on whether a subject is acceptable. -// Spaces are not allowed and all tokens should be > 0 in len. -func badSubject(subj string) bool { - if strings.ContainsAny(subj, " \t\r\n") { - return true - } - tokens := strings.Split(subj, ".") - for _, t := range tokens { - if len(t) == 0 { - return true - } - } - return false -} - -// badQueue will check a queue name for whitespace. -func badQueue(qname string) bool { - return strings.ContainsAny(qname, " \t\r\n") -} - -// subscribe is the internal subscribe function that indicates interest in a subject. -func (nc *Conn) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync bool, js *jsSub) (*Subscription, error) { - if nc == nil { - return nil, ErrInvalidConnection - } - nc.mu.Lock() - defer nc.mu.Unlock() - return nc.subscribeLocked(subj, queue, cb, ch, isSync, js) -} - -func (nc *Conn) subscribeLocked(subj, queue string, cb MsgHandler, ch chan *Msg, isSync bool, js *jsSub) (*Subscription, error) { - if nc == nil { - return nil, ErrInvalidConnection - } - if badSubject(subj) { - return nil, ErrBadSubject - } - if queue != _EMPTY_ && badQueue(queue) { - return nil, ErrBadQueueName - } - - // Check for some error conditions. - if nc.isClosed() { - return nil, ErrConnectionClosed - } - if nc.isDraining() { - return nil, ErrConnectionDraining - } - - if cb == nil && ch == nil { - return nil, ErrBadSubscription - } - - sub := &Subscription{ - Subject: subj, - Queue: queue, - mcb: cb, - conn: nc, - jsi: js, - } - // Set pending limits. - if ch != nil { - sub.pMsgsLimit = cap(ch) - } else { - sub.pMsgsLimit = DefaultSubPendingMsgsLimit - } - sub.pBytesLimit = DefaultSubPendingBytesLimit - - // If we have an async callback, start up a sub specific - // Go routine to deliver the messages. - var sr bool - if cb != nil { - sub.typ = AsyncSubscription - sub.pCond = sync.NewCond(&sub.mu) - sr = true - } else if !isSync { - sub.typ = ChanSubscription - sub.mch = ch - } else { // Sync Subscription - sub.typ = SyncSubscription - sub.mch = ch - } - - nc.subsMu.Lock() - nc.ssid++ - sub.sid = nc.ssid - nc.subs[sub.sid] = sub - nc.subsMu.Unlock() - - // Let's start the go routine now that it is fully setup and registered. - if sr { - go nc.waitForMsgs(sub) - } - - // We will send these for all subs when we reconnect - // so that we can suppress here if reconnecting. - if !nc.isReconnecting() { - nc.bw.appendString(fmt.Sprintf(subProto, subj, queue, sub.sid)) - nc.kickFlusher() - } - - return sub, nil -} - -// NumSubscriptions returns active number of subscriptions. -func (nc *Conn) NumSubscriptions() int { - nc.mu.RLock() - defer nc.mu.RUnlock() - return len(nc.subs) -} - -// Lock for nc should be held here upon entry -func (nc *Conn) removeSub(s *Subscription) { - nc.subsMu.Lock() - delete(nc.subs, s.sid) - nc.subsMu.Unlock() - s.mu.Lock() - defer s.mu.Unlock() - // Release callers on NextMsg for SyncSubscription only - if s.mch != nil && s.typ == SyncSubscription { - close(s.mch) - } - s.mch = nil - - // If JS subscription then stop HB timer. - if jsi := s.jsi; jsi != nil { - if jsi.hbc != nil { - jsi.hbc.Stop() - jsi.hbc = nil - } - if jsi.csfct != nil { - jsi.csfct.Stop() - jsi.csfct = nil - } - } - - // Mark as invalid - s.closed = true - if s.pCond != nil { - s.pCond.Broadcast() - } -} - -// SubscriptionType is the type of the Subscription. -type SubscriptionType int - -// The different types of subscription types. -const ( - AsyncSubscription = SubscriptionType(iota) - SyncSubscription - ChanSubscription - NilSubscription - PullSubscription -) - -// Type returns the type of Subscription. -func (s *Subscription) Type() SubscriptionType { - if s == nil { - return NilSubscription - } - s.mu.Lock() - defer s.mu.Unlock() - // Pull subscriptions are really a SyncSubscription and we want this - // type to be set internally for all delivered messages management, etc.. - // So check when to return PullSubscription to the user. - if s.jsi != nil && s.jsi.pull { - return PullSubscription - } - return s.typ -} - -// IsValid returns a boolean indicating whether the subscription -// is still active. This will return false if the subscription has -// already been closed. -func (s *Subscription) IsValid() bool { - if s == nil { - return false - } - s.mu.Lock() - defer s.mu.Unlock() - return s.conn != nil && !s.closed -} - -// Drain will remove interest but continue callbacks until all messages -// have been processed. -// -// For a JetStream subscription, if the library has created the JetStream -// consumer, the library will send a DeleteConsumer request to the server -// when the Drain operation completes. If a failure occurs when deleting -// the JetStream consumer, an error will be reported to the asynchronous -// error callback. -// If you do not wish the JetStream consumer to be automatically deleted, -// ensure that the consumer is not created by the library, which means -// create the consumer with AddConsumer and bind to this consumer. -func (s *Subscription) Drain() error { - if s == nil { - return ErrBadSubscription - } - s.mu.Lock() - conn := s.conn - s.mu.Unlock() - if conn == nil { - return ErrBadSubscription - } - return conn.unsubscribe(s, 0, true) -} - -// Unsubscribe will remove interest in the given subject. -// -// For a JetStream subscription, if the library has created the JetStream -// consumer, it will send a DeleteConsumer request to the server (if the -// unsubscribe itself was successful). If the delete operation fails, the -// error will be returned. -// If you do not wish the JetStream consumer to be automatically deleted, -// ensure that the consumer is not created by the library, which means -// create the consumer with AddConsumer and bind to this consumer (using -// the nats.Bind() option). -func (s *Subscription) Unsubscribe() error { - if s == nil { - return ErrBadSubscription - } - s.mu.Lock() - conn := s.conn - closed := s.closed - dc := s.jsi != nil && s.jsi.dc - s.mu.Unlock() - if conn == nil || conn.IsClosed() { - return ErrConnectionClosed - } - if closed { - return ErrBadSubscription - } - if conn.IsDraining() { - return ErrConnectionDraining - } - err := conn.unsubscribe(s, 0, false) - if err == nil && dc { - err = s.deleteConsumer() - } - return err -} - -// checkDrained will watch for a subscription to be fully drained -// and then remove it. -func (nc *Conn) checkDrained(sub *Subscription) { - if nc == nil || sub == nil { - return - } - - // This allows us to know that whatever we have in the client pending - // is correct and the server will not send additional information. - nc.Flush() - - sub.mu.Lock() - // For JS subscriptions, check if we are going to delete the - // JS consumer when drain completes. - dc := sub.jsi != nil && sub.jsi.dc - sub.mu.Unlock() - - // Once we are here we just wait for Pending to reach 0 or - // any other state to exit this go routine. - for { - // check connection is still valid. - if nc.IsClosed() { - return - } - - // Check subscription state - sub.mu.Lock() - conn := sub.conn - closed := sub.closed - pMsgs := sub.pMsgs - sub.mu.Unlock() - - if conn == nil || closed || pMsgs == 0 { - nc.mu.Lock() - nc.removeSub(sub) - nc.mu.Unlock() - if dc { - if err := sub.deleteConsumer(); err != nil { - nc.mu.Lock() - if errCB := nc.Opts.AsyncErrorCB; errCB != nil { - nc.ach.push(func() { errCB(nc, sub, err) }) - } - nc.mu.Unlock() - } - } - return - } - - time.Sleep(100 * time.Millisecond) - } -} - -// AutoUnsubscribe will issue an automatic Unsubscribe that is -// processed by the server when max messages have been received. -// This can be useful when sending a request to an unknown number -// of subscribers. -func (s *Subscription) AutoUnsubscribe(max int) error { - if s == nil { - return ErrBadSubscription - } - s.mu.Lock() - conn := s.conn - closed := s.closed - s.mu.Unlock() - if conn == nil || closed { - return ErrBadSubscription - } - return conn.unsubscribe(s, max, false) -} - -// SetClosedHandler will set the closed handler for when a subscription -// is closed (either unsubscribed or drained). -func (s *Subscription) SetClosedHandler(handler func(subject string)) { - s.mu.Lock() - s.pDone = handler - s.mu.Unlock() -} - -// unsubscribe performs the low level unsubscribe to the server. -// Use Subscription.Unsubscribe() -func (nc *Conn) unsubscribe(sub *Subscription, max int, drainMode bool) error { - var maxStr string - if max > 0 { - sub.mu.Lock() - sub.max = uint64(max) - if sub.delivered < sub.max { - maxStr = strconv.Itoa(max) - } - sub.mu.Unlock() - } - - nc.mu.Lock() - // ok here, but defer is expensive - defer nc.mu.Unlock() - - if nc.isClosed() { - return ErrConnectionClosed - } - - nc.subsMu.RLock() - s := nc.subs[sub.sid] - nc.subsMu.RUnlock() - // Already unsubscribed - if s == nil { - return nil - } - - if maxStr == _EMPTY_ && !drainMode { - nc.removeSub(s) - } - - if drainMode { - go nc.checkDrained(sub) - } - - // We will send these for all subs when we reconnect - // so that we can suppress here. - if !nc.isReconnecting() { - nc.bw.appendString(fmt.Sprintf(unsubProto, s.sid, maxStr)) - nc.kickFlusher() - } - - // For JetStream subscriptions cancel the attached context if there is any. - var cancel func() - sub.mu.Lock() - jsi := sub.jsi - if jsi != nil { - cancel = jsi.cancel - jsi.cancel = nil - } - sub.mu.Unlock() - if cancel != nil { - cancel() - } - - return nil -} - -// NextMsg will return the next message available to a synchronous subscriber -// or block until one is available. An error is returned if the subscription is invalid (ErrBadSubscription), -// the connection is closed (ErrConnectionClosed), the timeout is reached (ErrTimeout), -// or if there were no responders (ErrNoResponders) when used in the context of a request/reply. -func (s *Subscription) NextMsg(timeout time.Duration) (*Msg, error) { - if s == nil { - return nil, ErrBadSubscription - } - - s.mu.Lock() - err := s.validateNextMsgState(false) - if err != nil { - s.mu.Unlock() - return nil, err - } - - // snapshot - mch := s.mch - s.mu.Unlock() - - var ok bool - var msg *Msg - - // If something is available right away, let's optimize that case. - select { - case msg, ok = <-mch: - if !ok { - return nil, s.getNextMsgErr() - } - if err := s.processNextMsgDelivered(msg); err != nil { - return nil, err - } else { - return msg, nil - } - default: - } - - // If we are here a message was not immediately available, so lets loop - // with a timeout. - - t := globalTimerPool.Get(timeout) - defer globalTimerPool.Put(t) - - select { - case msg, ok = <-mch: - if !ok { - return nil, s.getNextMsgErr() - } - if err := s.processNextMsgDelivered(msg); err != nil { - return nil, err - } - case <-t.C: - return nil, ErrTimeout - } - - return msg, nil -} - -// validateNextMsgState checks whether the subscription is in a valid -// state to call NextMsg and be delivered another message synchronously. -// This should be called while holding the lock. -func (s *Subscription) validateNextMsgState(pullSubInternal bool) error { - if s.connClosed { - return ErrConnectionClosed - } - if s.mch == nil { - if s.max > 0 && s.delivered >= s.max { - return ErrMaxMessages - } else if s.closed { - return ErrBadSubscription - } - } - if s.mcb != nil { - return ErrSyncSubRequired - } - if s.sc { - s.sc = false - return ErrSlowConsumer - } - // Unless this is from an internal call, reject use of this API. - // Users should use Fetch() instead. - if !pullSubInternal && s.jsi != nil && s.jsi.pull { - return ErrTypeSubscription - } - return nil -} - -// This is called when the sync channel has been closed. -// The error returned will be either connection or subscription -// closed depending on what caused NextMsg() to fail. -func (s *Subscription) getNextMsgErr() error { - s.mu.Lock() - defer s.mu.Unlock() - if s.connClosed { - return ErrConnectionClosed - } - return ErrBadSubscription -} - -// processNextMsgDelivered takes a message and applies the needed -// accounting to the stats from the subscription, returning an -// error in case we have the maximum number of messages have been -// delivered already. It should not be called while holding the lock. -func (s *Subscription) processNextMsgDelivered(msg *Msg) error { - s.mu.Lock() - nc := s.conn - max := s.max - - var fcReply string - // Update some stats. - s.delivered++ - delivered := s.delivered - if s.jsi != nil { - fcReply = s.checkForFlowControlResponse() - } - - if s.typ == SyncSubscription { - s.pMsgs-- - s.pBytes -= len(msg.Data) - } - s.mu.Unlock() - - if fcReply != _EMPTY_ { - nc.Publish(fcReply, nil) - } - - if max > 0 { - if delivered > max { - return ErrMaxMessages - } - // Remove subscription if we have reached max. - if delivered == max { - nc.mu.Lock() - nc.removeSub(s) - nc.mu.Unlock() - } - } - if len(msg.Data) == 0 && msg.Header.Get(statusHdr) == noResponders { - return ErrNoResponders - } - - return nil -} - -// Queued returns the number of queued messages in the client for this subscription. -// DEPRECATED: Use Pending() -func (s *Subscription) QueuedMsgs() (int, error) { - m, _, err := s.Pending() - return int(m), err -} - -// Pending returns the number of queued messages and queued bytes in the client for this subscription. -func (s *Subscription) Pending() (int, int, error) { - if s == nil { - return -1, -1, ErrBadSubscription - } - s.mu.Lock() - defer s.mu.Unlock() - if s.conn == nil || s.closed { - return -1, -1, ErrBadSubscription - } - if s.typ == ChanSubscription { - return -1, -1, ErrTypeSubscription - } - return s.pMsgs, s.pBytes, nil -} - -// MaxPending returns the maximum number of queued messages and queued bytes seen so far. -func (s *Subscription) MaxPending() (int, int, error) { - if s == nil { - return -1, -1, ErrBadSubscription - } - s.mu.Lock() - defer s.mu.Unlock() - if s.conn == nil || s.closed { - return -1, -1, ErrBadSubscription - } - if s.typ == ChanSubscription { - return -1, -1, ErrTypeSubscription - } - return s.pMsgsMax, s.pBytesMax, nil -} - -// ClearMaxPending resets the maximums seen so far. -func (s *Subscription) ClearMaxPending() error { - if s == nil { - return ErrBadSubscription - } - s.mu.Lock() - defer s.mu.Unlock() - if s.conn == nil || s.closed { - return ErrBadSubscription - } - if s.typ == ChanSubscription { - return ErrTypeSubscription - } - s.pMsgsMax, s.pBytesMax = 0, 0 - return nil -} - -// Pending Limits -const ( - // DefaultSubPendingMsgsLimit will be 512k msgs. - DefaultSubPendingMsgsLimit = 512 * 1024 - // DefaultSubPendingBytesLimit is 64MB - DefaultSubPendingBytesLimit = 64 * 1024 * 1024 -) - -// PendingLimits returns the current limits for this subscription. -// If no error is returned, a negative value indicates that the -// given metric is not limited. -func (s *Subscription) PendingLimits() (int, int, error) { - if s == nil { - return -1, -1, ErrBadSubscription - } - s.mu.Lock() - defer s.mu.Unlock() - if s.conn == nil || s.closed { - return -1, -1, ErrBadSubscription - } - if s.typ == ChanSubscription { - return -1, -1, ErrTypeSubscription - } - return s.pMsgsLimit, s.pBytesLimit, nil -} - -// SetPendingLimits sets the limits for pending msgs and bytes for this subscription. -// Zero is not allowed. Any negative value means that the given metric is not limited. -func (s *Subscription) SetPendingLimits(msgLimit, bytesLimit int) error { - if s == nil { - return ErrBadSubscription - } - s.mu.Lock() - defer s.mu.Unlock() - if s.conn == nil || s.closed { - return ErrBadSubscription - } - if s.typ == ChanSubscription { - return ErrTypeSubscription - } - if msgLimit == 0 || bytesLimit == 0 { - return ErrInvalidArg - } - s.pMsgsLimit, s.pBytesLimit = msgLimit, bytesLimit - return nil -} - -// Delivered returns the number of delivered messages for this subscription. -func (s *Subscription) Delivered() (int64, error) { - if s == nil { - return -1, ErrBadSubscription - } - s.mu.Lock() - defer s.mu.Unlock() - if s.conn == nil || s.closed { - return -1, ErrBadSubscription - } - return int64(s.delivered), nil -} - -// Dropped returns the number of known dropped messages for this subscription. -// This will correspond to messages dropped by violations of PendingLimits. If -// the server declares the connection a SlowConsumer, this number may not be -// valid. -func (s *Subscription) Dropped() (int, error) { - if s == nil { - return -1, ErrBadSubscription - } - s.mu.Lock() - defer s.mu.Unlock() - if s.conn == nil || s.closed { - return -1, ErrBadSubscription - } - return s.dropped, nil -} - -// Respond allows a convenient way to respond to requests in service based subscriptions. -func (m *Msg) Respond(data []byte) error { - if m == nil || m.Sub == nil { - return ErrMsgNotBound - } - if m.Reply == "" { - return ErrMsgNoReply - } - m.Sub.mu.Lock() - nc := m.Sub.conn - m.Sub.mu.Unlock() - // No need to check the connection here since the call to publish will do all the checking. - return nc.Publish(m.Reply, data) -} - -// RespondMsg allows a convenient way to respond to requests in service based subscriptions that might include headers -func (m *Msg) RespondMsg(msg *Msg) error { - if m == nil || m.Sub == nil { - return ErrMsgNotBound - } - if m.Reply == "" { - return ErrMsgNoReply - } - msg.Subject = m.Reply - m.Sub.mu.Lock() - nc := m.Sub.conn - m.Sub.mu.Unlock() - // No need to check the connection here since the call to publish will do all the checking. - return nc.PublishMsg(msg) -} - -// FIXME: This is a hack -// removeFlushEntry is needed when we need to discard queued up responses -// for our pings as part of a flush call. This happens when we have a flush -// call outstanding and we call close. -func (nc *Conn) removeFlushEntry(ch chan struct{}) bool { - nc.mu.Lock() - defer nc.mu.Unlock() - if nc.pongs == nil { - return false - } - for i, c := range nc.pongs { - if c == ch { - nc.pongs[i] = nil - return true - } - } - return false -} - -// The lock must be held entering this function. -func (nc *Conn) sendPing(ch chan struct{}) { - nc.pongs = append(nc.pongs, ch) - nc.bw.appendString(pingProto) - // Flush in place. - nc.bw.flush() -} - -// This will fire periodically and send a client origin -// ping to the server. Will also check that we have received -// responses from the server. -func (nc *Conn) processPingTimer() { - nc.mu.Lock() - - if nc.status != CONNECTED { - nc.mu.Unlock() - return - } - - // Check for violation - nc.pout++ - if nc.pout > nc.Opts.MaxPingsOut { - nc.mu.Unlock() - nc.processOpErr(ErrStaleConnection) - return - } - - nc.sendPing(nil) - nc.ptmr.Reset(nc.Opts.PingInterval) - nc.mu.Unlock() -} - -// FlushTimeout allows a Flush operation to have an associated timeout. -func (nc *Conn) FlushTimeout(timeout time.Duration) (err error) { - if nc == nil { - return ErrInvalidConnection - } - if timeout <= 0 { - return ErrBadTimeout - } - - nc.mu.Lock() - if nc.isClosed() { - nc.mu.Unlock() - return ErrConnectionClosed - } - t := globalTimerPool.Get(timeout) - defer globalTimerPool.Put(t) - - // Create a buffered channel to prevent chan send to block - // in processPong() if this code here times out just when - // PONG was received. - ch := make(chan struct{}, 1) - nc.sendPing(ch) - nc.mu.Unlock() - - select { - case _, ok := <-ch: - if !ok { - err = ErrConnectionClosed - } else { - close(ch) - } - case <-t.C: - err = ErrTimeout - } - - if err != nil { - nc.removeFlushEntry(ch) - } - return -} - -// RTT calculates the round trip time between this client and the server. -func (nc *Conn) RTT() (time.Duration, error) { - if nc.IsClosed() { - return 0, ErrConnectionClosed - } - if nc.IsReconnecting() { - return 0, ErrDisconnected - } - start := time.Now() - if err := nc.FlushTimeout(10 * time.Second); err != nil { - return 0, err - } - return time.Since(start), nil -} - -// Flush will perform a round trip to the server and return when it -// receives the internal reply. -func (nc *Conn) Flush() error { - return nc.FlushTimeout(10 * time.Second) -} - -// Buffered will return the number of bytes buffered to be sent to the server. -// FIXME(dlc) take into account disconnected state. -func (nc *Conn) Buffered() (int, error) { - nc.mu.RLock() - defer nc.mu.RUnlock() - if nc.isClosed() || nc.bw == nil { - return -1, ErrConnectionClosed - } - return nc.bw.buffered(), nil -} - -// resendSubscriptions will send our subscription state back to the -// server. Used in reconnects -func (nc *Conn) resendSubscriptions() { - // Since we are going to send protocols to the server, we don't want to - // be holding the subsMu lock (which is used in processMsg). So copy - // the subscriptions in a temporary array. - nc.subsMu.RLock() - subs := make([]*Subscription, 0, len(nc.subs)) - for _, s := range nc.subs { - subs = append(subs, s) - } - nc.subsMu.RUnlock() - for _, s := range subs { - adjustedMax := uint64(0) - s.mu.Lock() - if s.max > 0 { - if s.delivered < s.max { - adjustedMax = s.max - s.delivered - } - // adjustedMax could be 0 here if the number of delivered msgs - // reached the max, if so unsubscribe. - if adjustedMax == 0 { - s.mu.Unlock() - nc.bw.writeDirect(fmt.Sprintf(unsubProto, s.sid, _EMPTY_)) - continue - } - } - subj, queue, sid := s.Subject, s.Queue, s.sid - s.mu.Unlock() - - nc.bw.writeDirect(fmt.Sprintf(subProto, subj, queue, sid)) - if adjustedMax > 0 { - maxStr := strconv.Itoa(int(adjustedMax)) - nc.bw.writeDirect(fmt.Sprintf(unsubProto, sid, maxStr)) - } - } -} - -// This will clear any pending flush calls and release pending calls. -// Lock is assumed to be held by the caller. -func (nc *Conn) clearPendingFlushCalls() { - // Clear any queued pongs, e.g. pending flush calls. - for _, ch := range nc.pongs { - if ch != nil { - close(ch) - } - } - nc.pongs = nil -} - -// This will clear any pending Request calls. -// Lock is assumed to be held by the caller. -func (nc *Conn) clearPendingRequestCalls() { - if nc.respMap == nil { - return - } - for key, ch := range nc.respMap { - if ch != nil { - close(ch) - delete(nc.respMap, key) - } - } -} - -// Low level close call that will do correct cleanup and set -// desired status. Also controls whether user defined callbacks -// will be triggered. The lock should not be held entering this -// function. This function will handle the locking manually. -func (nc *Conn) close(status Status, doCBs bool, err error) { - nc.mu.Lock() - if nc.isClosed() { - nc.status = status - nc.mu.Unlock() - return - } - nc.status = CLOSED - - // Kick the Go routines so they fall out. - nc.kickFlusher() - - // If the reconnect timer is waiting between a reconnect attempt, - // this will kick it out. - if nc.rqch != nil { - close(nc.rqch) - nc.rqch = nil - } - - // Clear any queued pongs, e.g. pending flush calls. - nc.clearPendingFlushCalls() - - // Clear any queued and blocking Requests. - nc.clearPendingRequestCalls() - - // Stop ping timer if set. - nc.stopPingTimer() - nc.ptmr = nil - - // Need to close and set TCP conn to nil if reconnect loop has stopped, - // otherwise we would incorrectly invoke Disconnect handler (if set) - // down below. - if nc.ar && nc.conn != nil { - nc.conn.Close() - nc.conn = nil - } else if nc.conn != nil { - // Go ahead and make sure we have flushed the outbound - nc.bw.flush() - defer nc.conn.Close() - } - - // Close sync subscriber channels and release any - // pending NextMsg() calls. - nc.subsMu.Lock() - for _, s := range nc.subs { - s.mu.Lock() - - // Release callers on NextMsg for SyncSubscription only - if s.mch != nil && s.typ == SyncSubscription { - close(s.mch) - } - s.mch = nil - // Mark as invalid, for signaling to waitForMsgs - s.closed = true - // Mark connection closed in subscription - s.connClosed = true - // If we have an async subscription, signals it to exit - if s.typ == AsyncSubscription && s.pCond != nil { - s.pCond.Signal() - } - - s.mu.Unlock() - } - nc.subs = nil - nc.subsMu.Unlock() - - nc.changeConnStatus(status) - - // Perform appropriate callback if needed for a disconnect. - if doCBs { - if nc.conn != nil { - if disconnectedErrCB := nc.Opts.DisconnectedErrCB; disconnectedErrCB != nil { - nc.ach.push(func() { disconnectedErrCB(nc, err) }) - } else if disconnectedCB := nc.Opts.DisconnectedCB; disconnectedCB != nil { - nc.ach.push(func() { disconnectedCB(nc) }) - } - } - if nc.Opts.ClosedCB != nil { - nc.ach.push(func() { nc.Opts.ClosedCB(nc) }) - } - } - // If this is terminal, then we have to notify the asyncCB handler that - // it can exit once all async callbacks have been dispatched. - if status == CLOSED { - nc.ach.close() - } - nc.mu.Unlock() -} - -// Close will close the connection to the server. This call will release -// all blocking calls, such as Flush() and NextMsg() -func (nc *Conn) Close() { - if nc != nil { - // This will be a no-op if the connection was not websocket. - // We do this here as opposed to inside close() because we want - // to do this only for the final user-driven close of the client. - // Otherwise, we would need to change close() to pass a boolean - // indicating that this is the case. - nc.wsClose() - nc.close(CLOSED, !nc.Opts.NoCallbacksAfterClientClose, nil) - } -} - -// IsClosed tests if a Conn has been closed. -func (nc *Conn) IsClosed() bool { - nc.mu.RLock() - defer nc.mu.RUnlock() - return nc.isClosed() -} - -// IsReconnecting tests if a Conn is reconnecting. -func (nc *Conn) IsReconnecting() bool { - nc.mu.RLock() - defer nc.mu.RUnlock() - return nc.isReconnecting() -} - -// IsConnected tests if a Conn is connected. -func (nc *Conn) IsConnected() bool { - nc.mu.RLock() - defer nc.mu.RUnlock() - return nc.isConnected() -} - -// drainConnection will run in a separate Go routine and will -// flush all publishes and drain all active subscriptions. -func (nc *Conn) drainConnection() { - // Snapshot subs list. - nc.mu.Lock() - - // Check again here if we are in a state to not process. - if nc.isClosed() { - nc.mu.Unlock() - return - } - if nc.isConnecting() || nc.isReconnecting() { - nc.mu.Unlock() - // Move to closed state. - nc.Close() - return - } - - subs := make([]*Subscription, 0, len(nc.subs)) - for _, s := range nc.subs { - if s == nc.respMux { - // Skip since might be in use while messages - // are being processed (can miss responses). - continue - } - subs = append(subs, s) - } - errCB := nc.Opts.AsyncErrorCB - drainWait := nc.Opts.DrainTimeout - respMux := nc.respMux - nc.mu.Unlock() - - // for pushing errors with context. - pushErr := func(err error) { - nc.mu.Lock() - nc.err = err - if errCB != nil { - nc.ach.push(func() { errCB(nc, nil, err) }) - } - nc.mu.Unlock() - } - - // Do subs first, skip request handler if present. - for _, s := range subs { - if err := s.Drain(); err != nil { - // We will notify about these but continue. - pushErr(err) - } - } - - // Wait for the subscriptions to drop to zero. - timeout := time.Now().Add(drainWait) - var min int - if respMux != nil { - min = 1 - } else { - min = 0 - } - for time.Now().Before(timeout) { - if nc.NumSubscriptions() == min { - break - } - time.Sleep(10 * time.Millisecond) - } - - // In case there was a request/response handler - // then need to call drain at the end. - if respMux != nil { - if err := respMux.Drain(); err != nil { - // We will notify about these but continue. - pushErr(err) - } - for time.Now().Before(timeout) { - if nc.NumSubscriptions() == 0 { - break - } - time.Sleep(10 * time.Millisecond) - } - } - - // Check if we timed out. - if nc.NumSubscriptions() != 0 { - pushErr(ErrDrainTimeout) - } - - // Flip State - nc.mu.Lock() - nc.changeConnStatus(DRAINING_PUBS) - nc.mu.Unlock() - - // Do publish drain via Flush() call. - err := nc.FlushTimeout(5 * time.Second) - if err != nil { - pushErr(err) - } - - // Move to closed state. - nc.Close() -} - -// Drain will put a connection into a drain state. All subscriptions will -// immediately be put into a drain state. Upon completion, the publishers -// will be drained and can not publish any additional messages. Upon draining -// of the publishers, the connection will be closed. Use the ClosedCB() -// option to know when the connection has moved from draining to closed. -// -// See note in Subscription.Drain for JetStream subscriptions. -func (nc *Conn) Drain() error { - nc.mu.Lock() - if nc.isClosed() { - nc.mu.Unlock() - return ErrConnectionClosed - } - if nc.isConnecting() || nc.isReconnecting() { - nc.mu.Unlock() - nc.Close() - return ErrConnectionReconnecting - } - if nc.isDraining() { - nc.mu.Unlock() - return nil - } - nc.changeConnStatus(DRAINING_SUBS) - go nc.drainConnection() - nc.mu.Unlock() - - return nil -} - -// IsDraining tests if a Conn is in the draining state. -func (nc *Conn) IsDraining() bool { - nc.mu.RLock() - defer nc.mu.RUnlock() - return nc.isDraining() -} - -// caller must lock -func (nc *Conn) getServers(implicitOnly bool) []string { - poolSize := len(nc.srvPool) - var servers = make([]string, 0) - for i := 0; i < poolSize; i++ { - if implicitOnly && !nc.srvPool[i].isImplicit { - continue - } - url := nc.srvPool[i].url - servers = append(servers, fmt.Sprintf("%s://%s", url.Scheme, url.Host)) - } - return servers -} - -// Servers returns the list of known server urls, including additional -// servers discovered after a connection has been established. If -// authentication is enabled, use UserInfo or Token when connecting with -// these urls. -func (nc *Conn) Servers() []string { - nc.mu.RLock() - defer nc.mu.RUnlock() - return nc.getServers(false) -} - -// DiscoveredServers returns only the server urls that have been discovered -// after a connection has been established. If authentication is enabled, -// use UserInfo or Token when connecting with these urls. -func (nc *Conn) DiscoveredServers() []string { - nc.mu.RLock() - defer nc.mu.RUnlock() - return nc.getServers(true) -} - -// Status returns the current state of the connection. -func (nc *Conn) Status() Status { - nc.mu.RLock() - defer nc.mu.RUnlock() - return nc.status -} - -// Test if Conn has been closed Lock is assumed held. -func (nc *Conn) isClosed() bool { - return nc.status == CLOSED -} - -// Test if Conn is in the process of connecting -func (nc *Conn) isConnecting() bool { - return nc.status == CONNECTING -} - -// Test if Conn is being reconnected. -func (nc *Conn) isReconnecting() bool { - return nc.status == RECONNECTING -} - -// Test if Conn is connected or connecting. -func (nc *Conn) isConnected() bool { - return nc.status == CONNECTED || nc.isDraining() -} - -// Test if Conn is in the draining state. -func (nc *Conn) isDraining() bool { - return nc.status == DRAINING_SUBS || nc.status == DRAINING_PUBS -} - -// Test if Conn is in the draining state for pubs. -func (nc *Conn) isDrainingPubs() bool { - return nc.status == DRAINING_PUBS -} - -// Stats will return a race safe copy of the Statistics section for the connection. -func (nc *Conn) Stats() Statistics { - // Stats are updated either under connection's mu or with atomic operations - // for inbound stats in processMsg(). - nc.mu.Lock() - stats := Statistics{ - InMsgs: atomic.LoadUint64(&nc.InMsgs), - InBytes: atomic.LoadUint64(&nc.InBytes), - OutMsgs: nc.OutMsgs, - OutBytes: nc.OutBytes, - Reconnects: nc.Reconnects, - } - nc.mu.Unlock() - return stats -} - -// MaxPayload returns the size limit that a message payload can have. -// This is set by the server configuration and delivered to the client -// upon connect. -func (nc *Conn) MaxPayload() int64 { - nc.mu.RLock() - defer nc.mu.RUnlock() - return nc.info.MaxPayload -} - -// HeadersSupported will return if the server supports headers -func (nc *Conn) HeadersSupported() bool { - nc.mu.RLock() - defer nc.mu.RUnlock() - return nc.info.Headers -} - -// AuthRequired will return if the connected server requires authorization. -func (nc *Conn) AuthRequired() bool { - nc.mu.RLock() - defer nc.mu.RUnlock() - return nc.info.AuthRequired -} - -// TLSRequired will return if the connected server requires TLS connections. -func (nc *Conn) TLSRequired() bool { - nc.mu.RLock() - defer nc.mu.RUnlock() - return nc.info.TLSRequired -} - -// Barrier schedules the given function `f` to all registered asynchronous -// subscriptions. -// Only the last subscription to see this barrier will invoke the function. -// If no subscription is registered at the time of this call, `f()` is invoked -// right away. -// ErrConnectionClosed is returned if the connection is closed prior to -// the call. -func (nc *Conn) Barrier(f func()) error { - nc.mu.Lock() - if nc.isClosed() { - nc.mu.Unlock() - return ErrConnectionClosed - } - nc.subsMu.Lock() - // Need to figure out how many non chan subscriptions there are - numSubs := 0 - for _, sub := range nc.subs { - if sub.typ == AsyncSubscription { - numSubs++ - } - } - if numSubs == 0 { - nc.subsMu.Unlock() - nc.mu.Unlock() - f() - return nil - } - barrier := &barrierInfo{refs: int64(numSubs), f: f} - for _, sub := range nc.subs { - sub.mu.Lock() - if sub.mch == nil { - msg := &Msg{barrier: barrier} - // Push onto the async pList - if sub.pTail != nil { - sub.pTail.next = msg - } else { - sub.pHead = msg - sub.pCond.Signal() - } - sub.pTail = msg - } - sub.mu.Unlock() - } - nc.subsMu.Unlock() - nc.mu.Unlock() - return nil -} - -// GetClientIP returns the client IP as known by the server. -// Supported as of server version 2.1.6. -func (nc *Conn) GetClientIP() (net.IP, error) { - nc.mu.RLock() - defer nc.mu.RUnlock() - if nc.isClosed() { - return nil, ErrConnectionClosed - } - if nc.info.ClientIP == "" { - return nil, ErrClientIPNotSupported - } - ip := net.ParseIP(nc.info.ClientIP) - return ip, nil -} - -// GetClientID returns the client ID assigned by the server to which -// the client is currently connected to. Note that the value may change if -// the client reconnects. -// This function returns ErrClientIDNotSupported if the server is of a -// version prior to 1.2.0. -func (nc *Conn) GetClientID() (uint64, error) { - nc.mu.RLock() - defer nc.mu.RUnlock() - if nc.isClosed() { - return 0, ErrConnectionClosed - } - if nc.info.CID == 0 { - return 0, ErrClientIDNotSupported - } - return nc.info.CID, nil -} - -// StatusChanged returns a channel on which given list of connection status changes will be reported. -// If no statuses are provided, defaults will be used: CONNECTED, RECONNECTING, DISCONNECTED, CLOSED. -func (nc *Conn) StatusChanged(statuses ...Status) chan Status { - if len(statuses) == 0 { - statuses = []Status{CONNECTED, RECONNECTING, DISCONNECTED, CLOSED} - } - ch := make(chan Status, 10) - for _, s := range statuses { - nc.registerStatusChangeListener(s, ch) - } - return ch -} - -// registerStatusChangeListener registers a channel waiting for a specific status change event. -// Status change events are non-blocking - if no receiver is waiting for the status change, -// it will not be sent on the channel. Closed channels are ignored. -func (nc *Conn) registerStatusChangeListener(status Status, ch chan Status) { - nc.mu.Lock() - defer nc.mu.Unlock() - if nc.statListeners == nil { - nc.statListeners = make(map[Status][]chan Status) - } - if _, ok := nc.statListeners[status]; !ok { - nc.statListeners[status] = make([]chan Status, 0) - } - nc.statListeners[status] = append(nc.statListeners[status], ch) -} - -// sendStatusEvent sends connection status event to all channels. -// If channel is closed, or there is no listener, sendStatusEvent -// will not block. Lock should be held entering. -func (nc *Conn) sendStatusEvent(s Status) { -Loop: - for i := 0; i < len(nc.statListeners[s]); i++ { - // make sure channel is not closed - select { - case <-nc.statListeners[s][i]: - // if chan is closed, remove it - nc.statListeners[s][i] = nc.statListeners[s][len(nc.statListeners[s])-1] - nc.statListeners[s] = nc.statListeners[s][:len(nc.statListeners[s])-1] - i-- - continue Loop - default: - } - // only send event if someone's listening - select { - case nc.statListeners[s][i] <- s: - default: - } - } -} - -// changeConnStatus changes connections status and sends events -// to all listeners. Lock should be held entering. -func (nc *Conn) changeConnStatus(status Status) { - if nc == nil { - return - } - nc.sendStatusEvent(status) - nc.status = status -} - -// NkeyOptionFromSeed will load an nkey pair from a seed file. -// It will return the NKey Option and will handle -// signing of nonce challenges from the server. It will take -// care to not hold keys in memory and to wipe memory. -func NkeyOptionFromSeed(seedFile string) (Option, error) { - kp, err := nkeyPairFromSeedFile(seedFile) - if err != nil { - return nil, err - } - // Wipe our key on exit. - defer kp.Wipe() - - pub, err := kp.PublicKey() - if err != nil { - return nil, err - } - if !nkeys.IsValidPublicUserKey(pub) { - return nil, fmt.Errorf("nats: Not a valid nkey user seed") - } - sigCB := func(nonce []byte) ([]byte, error) { - return sigHandler(nonce, seedFile) - } - return Nkey(string(pub), sigCB), nil -} - -// Just wipe slice with 'x', for clearing contents of creds or nkey seed file. -func wipeSlice(buf []byte) { - for i := range buf { - buf[i] = 'x' - } -} - -func userFromFile(userFile string) (string, error) { - path, err := expandPath(userFile) - if err != nil { - return _EMPTY_, fmt.Errorf("nats: %w", err) - } - - contents, err := os.ReadFile(path) - if err != nil { - return _EMPTY_, fmt.Errorf("nats: %w", err) - } - defer wipeSlice(contents) - return nkeys.ParseDecoratedJWT(contents) -} - -func homeDir() (string, error) { - if runtime.GOOS == "windows" { - homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH") - userProfile := os.Getenv("USERPROFILE") - - var home string - if homeDrive == "" || homePath == "" { - if userProfile == "" { - return _EMPTY_, errors.New("nats: failed to get home dir, require %HOMEDRIVE% and %HOMEPATH% or %USERPROFILE%") - } - home = userProfile - } else { - home = filepath.Join(homeDrive, homePath) - } - - return home, nil - } - - home := os.Getenv("HOME") - if home == "" { - return _EMPTY_, errors.New("nats: failed to get home dir, require $HOME") - } - return home, nil -} - -func expandPath(p string) (string, error) { - p = os.ExpandEnv(p) - - if !strings.HasPrefix(p, "~") { - return p, nil - } - - home, err := homeDir() - if err != nil { - return _EMPTY_, err - } - - return filepath.Join(home, p[1:]), nil -} - -func nkeyPairFromSeedFile(seedFile string) (nkeys.KeyPair, error) { - contents, err := os.ReadFile(seedFile) - if err != nil { - return nil, fmt.Errorf("nats: %w", err) - } - defer wipeSlice(contents) - return nkeys.ParseDecoratedNKey(contents) -} - -// Sign authentication challenges from the server. -// Do not keep private seed in memory. -func sigHandler(nonce []byte, seedFile string) ([]byte, error) { - kp, err := nkeyPairFromSeedFile(seedFile) - if err != nil { - return nil, fmt.Errorf("unable to extract key pair from file %q: %w", seedFile, err) - } - // Wipe our key on exit. - defer kp.Wipe() - - sig, _ := kp.Sign(nonce) - return sig, nil -} - -type timeoutWriter struct { - timeout time.Duration - conn net.Conn - err error -} - -// Write implements the io.Writer interface. -func (tw *timeoutWriter) Write(p []byte) (int, error) { - if tw.err != nil { - return 0, tw.err - } - - var n int - tw.conn.SetWriteDeadline(time.Now().Add(tw.timeout)) - n, tw.err = tw.conn.Write(p) - tw.conn.SetWriteDeadline(time.Time{}) - return n, tw.err -} diff --git a/vendor/github.com/nats-io/nats.go/netchan.go b/vendor/github.com/nats-io/nats.go/netchan.go deleted file mode 100644 index 060721eb..00000000 --- a/vendor/github.com/nats-io/nats.go/netchan.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2013-2022 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nats - -import ( - "errors" - "reflect" -) - -// This allows the functionality for network channels by binding send and receive Go chans -// to subjects and optionally queue groups. -// Data will be encoded and decoded via the EncodedConn and its associated encoders. - -// BindSendChan binds a channel for send operations to NATS. -func (c *EncodedConn) BindSendChan(subject string, channel any) error { - chVal := reflect.ValueOf(channel) - if chVal.Kind() != reflect.Chan { - return ErrChanArg - } - go chPublish(c, chVal, subject) - return nil -} - -// Publish all values that arrive on the channel until it is closed or we -// encounter an error. -func chPublish(c *EncodedConn, chVal reflect.Value, subject string) { - for { - val, ok := chVal.Recv() - if !ok { - // Channel has most likely been closed. - return - } - if e := c.Publish(subject, val.Interface()); e != nil { - // Do this under lock. - c.Conn.mu.Lock() - defer c.Conn.mu.Unlock() - - if c.Conn.Opts.AsyncErrorCB != nil { - // FIXME(dlc) - Not sure this is the right thing to do. - // FIXME(ivan) - If the connection is not yet closed, try to schedule the callback - if c.Conn.isClosed() { - go c.Conn.Opts.AsyncErrorCB(c.Conn, nil, e) - } else { - c.Conn.ach.push(func() { c.Conn.Opts.AsyncErrorCB(c.Conn, nil, e) }) - } - } - return - } - } -} - -// BindRecvChan binds a channel for receive operations from NATS. -func (c *EncodedConn) BindRecvChan(subject string, channel any) (*Subscription, error) { - return c.bindRecvChan(subject, _EMPTY_, channel) -} - -// BindRecvQueueChan binds a channel for queue-based receive operations from NATS. -func (c *EncodedConn) BindRecvQueueChan(subject, queue string, channel any) (*Subscription, error) { - return c.bindRecvChan(subject, queue, channel) -} - -// Internal function to bind receive operations for a channel. -func (c *EncodedConn) bindRecvChan(subject, queue string, channel any) (*Subscription, error) { - chVal := reflect.ValueOf(channel) - if chVal.Kind() != reflect.Chan { - return nil, ErrChanArg - } - argType := chVal.Type().Elem() - - cb := func(m *Msg) { - var oPtr reflect.Value - if argType.Kind() != reflect.Ptr { - oPtr = reflect.New(argType) - } else { - oPtr = reflect.New(argType.Elem()) - } - if err := c.Enc.Decode(m.Subject, m.Data, oPtr.Interface()); err != nil { - c.Conn.err = errors.New("nats: Got an error trying to unmarshal: " + err.Error()) - if c.Conn.Opts.AsyncErrorCB != nil { - c.Conn.ach.push(func() { c.Conn.Opts.AsyncErrorCB(c.Conn, m.Sub, c.Conn.err) }) - } - return - } - if argType.Kind() != reflect.Ptr { - oPtr = reflect.Indirect(oPtr) - } - // This is a bit hacky, but in this instance we may be trying to send to a closed channel. - // and the user does not know when it is safe to close the channel. - defer func() { - // If we have panicked, recover and close the subscription. - if r := recover(); r != nil { - m.Sub.Unsubscribe() - } - }() - // Actually do the send to the channel. - chVal.Send(oPtr) - } - - return c.Conn.subscribe(subject, queue, cb, nil, false, nil) -} diff --git a/vendor/github.com/nats-io/nats.go/object.go b/vendor/github.com/nats-io/nats.go/object.go deleted file mode 100644 index f6ba8fb1..00000000 --- a/vendor/github.com/nats-io/nats.go/object.go +++ /dev/null @@ -1,1386 +0,0 @@ -// Copyright 2021-2022 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nats - -import ( - "bytes" - "context" - "crypto/sha256" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "hash" - "io" - "net" - "os" - "strings" - "sync" - "time" - - "github.com/nats-io/nats.go/internal/parser" - "github.com/nats-io/nuid" -) - -// ObjectStoreManager creates, loads and deletes Object Stores -type ObjectStoreManager interface { - // ObjectStore will look up and bind to an existing object store instance. - ObjectStore(bucket string) (ObjectStore, error) - // CreateObjectStore will create an object store. - CreateObjectStore(cfg *ObjectStoreConfig) (ObjectStore, error) - // DeleteObjectStore will delete the underlying stream for the named object. - DeleteObjectStore(bucket string) error - // ObjectStoreNames is used to retrieve a list of bucket names - ObjectStoreNames(opts ...ObjectOpt) <-chan string - // ObjectStores is used to retrieve a list of bucket statuses - ObjectStores(opts ...ObjectOpt) <-chan ObjectStoreStatus -} - -// ObjectStore is a blob store capable of storing large objects efficiently in -// JetStream streams -type ObjectStore interface { - // Put will place the contents from the reader into a new object. - Put(obj *ObjectMeta, reader io.Reader, opts ...ObjectOpt) (*ObjectInfo, error) - // Get will pull the named object from the object store. - Get(name string, opts ...GetObjectOpt) (ObjectResult, error) - - // PutBytes is convenience function to put a byte slice into this object store. - PutBytes(name string, data []byte, opts ...ObjectOpt) (*ObjectInfo, error) - // GetBytes is a convenience function to pull an object from this object store and return it as a byte slice. - GetBytes(name string, opts ...GetObjectOpt) ([]byte, error) - - // PutString is convenience function to put a string into this object store. - PutString(name string, data string, opts ...ObjectOpt) (*ObjectInfo, error) - // GetString is a convenience function to pull an object from this object store and return it as a string. - GetString(name string, opts ...GetObjectOpt) (string, error) - - // PutFile is convenience function to put a file into this object store. - PutFile(file string, opts ...ObjectOpt) (*ObjectInfo, error) - // GetFile is a convenience function to pull an object from this object store and place it in a file. - GetFile(name, file string, opts ...GetObjectOpt) error - - // GetInfo will retrieve the current information for the object. - GetInfo(name string, opts ...GetObjectInfoOpt) (*ObjectInfo, error) - // UpdateMeta will update the metadata for the object. - UpdateMeta(name string, meta *ObjectMeta) error - - // Delete will delete the named object. - Delete(name string) error - - // AddLink will add a link to another object. - AddLink(name string, obj *ObjectInfo) (*ObjectInfo, error) - - // AddBucketLink will add a link to another object store. - AddBucketLink(name string, bucket ObjectStore) (*ObjectInfo, error) - - // Seal will seal the object store, no further modifications will be allowed. - Seal() error - - // Watch for changes in the underlying store and receive meta information updates. - Watch(opts ...WatchOpt) (ObjectWatcher, error) - - // List will list all the objects in this store. - List(opts ...ListObjectsOpt) ([]*ObjectInfo, error) - - // Status retrieves run-time status about the backing store of the bucket. - Status() (ObjectStoreStatus, error) -} - -type ObjectOpt interface { - configureObject(opts *objOpts) error -} - -type objOpts struct { - ctx context.Context -} - -// For nats.Context() support. -func (ctx ContextOpt) configureObject(opts *objOpts) error { - opts.ctx = ctx - return nil -} - -// ObjectWatcher is what is returned when doing a watch. -type ObjectWatcher interface { - // Updates returns a channel to read any updates to entries. - Updates() <-chan *ObjectInfo - // Stop will stop this watcher. - Stop() error -} - -var ( - ErrObjectConfigRequired = errors.New("nats: object-store config required") - ErrBadObjectMeta = errors.New("nats: object-store meta information invalid") - ErrObjectNotFound = errors.New("nats: object not found") - ErrInvalidStoreName = errors.New("nats: invalid object-store name") - ErrDigestMismatch = errors.New("nats: received a corrupt object, digests do not match") - ErrInvalidDigestFormat = errors.New("nats: object digest hash has invalid format") - ErrNoObjectsFound = errors.New("nats: no objects found") - ErrObjectAlreadyExists = errors.New("nats: an object already exists with that name") - ErrNameRequired = errors.New("nats: name is required") - ErrNeeds262 = errors.New("nats: object-store requires at least server version 2.6.2") - ErrLinkNotAllowed = errors.New("nats: link cannot be set when putting the object in bucket") - ErrObjectRequired = errors.New("nats: object required") - ErrNoLinkToDeleted = errors.New("nats: not allowed to link to a deleted object") - ErrNoLinkToLink = errors.New("nats: not allowed to link to another link") - ErrCantGetBucket = errors.New("nats: invalid Get, object is a link to a bucket") - ErrBucketRequired = errors.New("nats: bucket required") - ErrBucketMalformed = errors.New("nats: bucket malformed") - ErrUpdateMetaDeleted = errors.New("nats: cannot update meta for a deleted object") -) - -// ObjectStoreConfig is the config for the object store. -type ObjectStoreConfig struct { - Bucket string `json:"bucket"` - Description string `json:"description,omitempty"` - TTL time.Duration `json:"max_age,omitempty"` - MaxBytes int64 `json:"max_bytes,omitempty"` - Storage StorageType `json:"storage,omitempty"` - Replicas int `json:"num_replicas,omitempty"` - Placement *Placement `json:"placement,omitempty"` - - // Bucket-specific metadata - // NOTE: Metadata requires nats-server v2.10.0+ - Metadata map[string]string `json:"metadata,omitempty"` -} - -type ObjectStoreStatus interface { - // Bucket is the name of the bucket - Bucket() string - // Description is the description supplied when creating the bucket - Description() string - // TTL indicates how long objects are kept in the bucket - TTL() time.Duration - // Storage indicates the underlying JetStream storage technology used to store data - Storage() StorageType - // Replicas indicates how many storage replicas are kept for the data in the bucket - Replicas() int - // Sealed indicates the stream is sealed and cannot be modified in any way - Sealed() bool - // Size is the combined size of all data in the bucket including metadata, in bytes - Size() uint64 - // BackingStore provides details about the underlying storage - BackingStore() string - // Metadata is the user supplied metadata for the bucket - Metadata() map[string]string -} - -// ObjectMetaOptions -type ObjectMetaOptions struct { - Link *ObjectLink `json:"link,omitempty"` - ChunkSize uint32 `json:"max_chunk_size,omitempty"` -} - -// ObjectMeta is high level information about an object. -type ObjectMeta struct { - Name string `json:"name"` - Description string `json:"description,omitempty"` - Headers Header `json:"headers,omitempty"` - Metadata map[string]string `json:"metadata,omitempty"` - - // Optional options. - Opts *ObjectMetaOptions `json:"options,omitempty"` -} - -// ObjectInfo is meta plus instance information. -type ObjectInfo struct { - ObjectMeta - Bucket string `json:"bucket"` - NUID string `json:"nuid"` - Size uint64 `json:"size"` - ModTime time.Time `json:"mtime"` - Chunks uint32 `json:"chunks"` - Digest string `json:"digest,omitempty"` - Deleted bool `json:"deleted,omitempty"` -} - -// ObjectLink is used to embed links to other buckets and objects. -type ObjectLink struct { - // Bucket is the name of the other object store. - Bucket string `json:"bucket"` - // Name can be used to link to a single object. - // If empty means this is a link to the whole store, like a directory. - Name string `json:"name,omitempty"` -} - -// ObjectResult will return the underlying stream info and also be an io.ReadCloser. -type ObjectResult interface { - io.ReadCloser - Info() (*ObjectInfo, error) - Error() error -} - -const ( - objNameTmpl = "OBJ_%s" // OBJ_ // stream name - objAllChunksPreTmpl = "$O.%s.C.>" // $O..C.> // chunk stream subject - objAllMetaPreTmpl = "$O.%s.M.>" // $O..M.> // meta stream subject - objChunksPreTmpl = "$O.%s.C.%s" // $O..C. // chunk message subject - objMetaPreTmpl = "$O.%s.M.%s" // $O..M. // meta message subject - objNoPending = "0" - objDefaultChunkSize = uint32(128 * 1024) // 128k - objDigestType = "SHA-256=" - objDigestTmpl = objDigestType + "%s" -) - -type obs struct { - name string - stream string - js *js -} - -// CreateObjectStore will create an object store. -func (js *js) CreateObjectStore(cfg *ObjectStoreConfig) (ObjectStore, error) { - if !js.nc.serverMinVersion(2, 6, 2) { - return nil, ErrNeeds262 - } - if cfg == nil { - return nil, ErrObjectConfigRequired - } - if !validBucketRe.MatchString(cfg.Bucket) { - return nil, ErrInvalidStoreName - } - - name := cfg.Bucket - chunks := fmt.Sprintf(objAllChunksPreTmpl, name) - meta := fmt.Sprintf(objAllMetaPreTmpl, name) - - // We will set explicitly some values so that we can do comparison - // if we get an "already in use" error and need to check if it is same. - // See kv - replicas := cfg.Replicas - if replicas == 0 { - replicas = 1 - } - maxBytes := cfg.MaxBytes - if maxBytes == 0 { - maxBytes = -1 - } - - scfg := &StreamConfig{ - Name: fmt.Sprintf(objNameTmpl, name), - Description: cfg.Description, - Subjects: []string{chunks, meta}, - MaxAge: cfg.TTL, - MaxBytes: maxBytes, - Storage: cfg.Storage, - Replicas: replicas, - Placement: cfg.Placement, - Discard: DiscardNew, - AllowRollup: true, - AllowDirect: true, - Metadata: cfg.Metadata, - } - - // Create our stream. - _, err := js.AddStream(scfg) - if err != nil { - return nil, err - } - - return &obs{name: name, stream: scfg.Name, js: js}, nil -} - -// ObjectStore will look up and bind to an existing object store instance. -func (js *js) ObjectStore(bucket string) (ObjectStore, error) { - if !validBucketRe.MatchString(bucket) { - return nil, ErrInvalidStoreName - } - if !js.nc.serverMinVersion(2, 6, 2) { - return nil, ErrNeeds262 - } - - stream := fmt.Sprintf(objNameTmpl, bucket) - si, err := js.StreamInfo(stream) - if err != nil { - return nil, err - } - return &obs{name: bucket, stream: si.Config.Name, js: js}, nil -} - -// DeleteObjectStore will delete the underlying stream for the named object. -func (js *js) DeleteObjectStore(bucket string) error { - stream := fmt.Sprintf(objNameTmpl, bucket) - return js.DeleteStream(stream) -} - -func encodeName(name string) string { - return base64.URLEncoding.EncodeToString([]byte(name)) -} - -// Put will place the contents from the reader into this object-store. -func (obs *obs) Put(meta *ObjectMeta, r io.Reader, opts ...ObjectOpt) (*ObjectInfo, error) { - if meta == nil || meta.Name == "" { - return nil, ErrBadObjectMeta - } - - if meta.Opts == nil { - meta.Opts = &ObjectMetaOptions{ChunkSize: objDefaultChunkSize} - } else if meta.Opts.Link != nil { - return nil, ErrLinkNotAllowed - } else if meta.Opts.ChunkSize == 0 { - meta.Opts.ChunkSize = objDefaultChunkSize - } - - var o objOpts - for _, opt := range opts { - if opt != nil { - if err := opt.configureObject(&o); err != nil { - return nil, err - } - } - } - ctx := o.ctx - - // Create the new nuid so chunks go on a new subject if the name is re-used - newnuid := nuid.Next() - - // These will be used in more than one place - chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, newnuid) - - // Grab existing meta info (einfo). Ok to be found or not found, any other error is a problem - // Chunks on the old nuid can be cleaned up at the end - einfo, err := obs.GetInfo(meta.Name, GetObjectInfoShowDeleted()) // GetInfo will encode the name - if err != nil && err != ErrObjectNotFound { - return nil, err - } - - // For async error handling - var perr error - var mu sync.Mutex - setErr := func(err error) { - mu.Lock() - defer mu.Unlock() - perr = err - } - getErr := func() error { - mu.Lock() - defer mu.Unlock() - return perr - } - - // Create our own JS context to handle errors etc. - jetStream, err := obs.js.nc.JetStream(PublishAsyncErrHandler(func(js JetStream, _ *Msg, err error) { setErr(err) })) - if err != nil { - return nil, err - } - - defer jetStream.(*js).cleanupReplySub() - - purgePartial := func() { - // wait until all pubs are complete or up to default timeout before attempting purge - select { - case <-jetStream.PublishAsyncComplete(): - case <-time.After(obs.js.opts.wait): - } - obs.js.purgeStream(obs.stream, &StreamPurgeRequest{Subject: chunkSubj}) - } - - m, h := NewMsg(chunkSubj), sha256.New() - chunk, sent, total := make([]byte, meta.Opts.ChunkSize), 0, uint64(0) - - // set up the info object. The chunk upload sets the size and digest - info := &ObjectInfo{Bucket: obs.name, NUID: newnuid, ObjectMeta: *meta} - - for r != nil { - if ctx != nil { - select { - case <-ctx.Done(): - if ctx.Err() == context.Canceled { - err = ctx.Err() - } else { - err = ErrTimeout - } - default: - } - if err != nil { - purgePartial() - return nil, err - } - } - - // Actual read. - // TODO(dlc) - Deadline? - n, readErr := r.Read(chunk) - - // Handle all non EOF errors - if readErr != nil && readErr != io.EOF { - purgePartial() - return nil, readErr - } - - // Add chunk only if we received data - if n > 0 { - // Chunk processing. - m.Data = chunk[:n] - h.Write(m.Data) - - // Send msg itself. - if _, err := jetStream.PublishMsgAsync(m); err != nil { - purgePartial() - return nil, err - } - if err := getErr(); err != nil { - purgePartial() - return nil, err - } - // Update totals. - sent++ - total += uint64(n) - } - - // EOF Processing. - if readErr == io.EOF { - // Place meta info. - info.Size, info.Chunks = uint64(total), uint32(sent) - info.Digest = GetObjectDigestValue(h) - break - } - } - - // Prepare the meta message - metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(meta.Name)) - mm := NewMsg(metaSubj) - mm.Header.Set(MsgRollup, MsgRollupSubject) - mm.Data, err = json.Marshal(info) - if err != nil { - if r != nil { - purgePartial() - } - return nil, err - } - - // Publish the meta message. - _, err = jetStream.PublishMsgAsync(mm) - if err != nil { - if r != nil { - purgePartial() - } - return nil, err - } - - // Wait for all to be processed. - select { - case <-jetStream.PublishAsyncComplete(): - if err := getErr(); err != nil { - if r != nil { - purgePartial() - } - return nil, err - } - case <-time.After(obs.js.opts.wait): - return nil, ErrTimeout - } - - info.ModTime = time.Now().UTC() // This time is not actually the correct time - - // Delete any original chunks. - if einfo != nil && !einfo.Deleted { - echunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, einfo.NUID) - obs.js.purgeStream(obs.stream, &StreamPurgeRequest{Subject: echunkSubj}) - } - - // TODO would it be okay to do this to return the info with the correct time? - // With the understanding that it is an extra call to the server. - // Otherwise the time the user gets back is the client time, not the server time. - // return obs.GetInfo(info.Name) - - return info, nil -} - -// GetObjectDigestValue calculates the base64 value of hashed data -func GetObjectDigestValue(data hash.Hash) string { - sha := data.Sum(nil) - return fmt.Sprintf(objDigestTmpl, base64.URLEncoding.EncodeToString(sha[:])) -} - -// DecodeObjectDigest decodes base64 hash -func DecodeObjectDigest(data string) ([]byte, error) { - digest := strings.SplitN(data, "=", 2) - if len(digest) != 2 { - return nil, ErrInvalidDigestFormat - } - return base64.URLEncoding.DecodeString(digest[1]) -} - -// ObjectResult impl. -type objResult struct { - sync.Mutex - info *ObjectInfo - r io.ReadCloser - err error - ctx context.Context - digest hash.Hash -} - -func (info *ObjectInfo) isLink() bool { - return info.ObjectMeta.Opts != nil && info.ObjectMeta.Opts.Link != nil -} - -type GetObjectOpt interface { - configureGetObject(opts *getObjectOpts) error -} -type getObjectOpts struct { - ctx context.Context - // Include deleted object in the result. - showDeleted bool -} - -type getObjectFn func(opts *getObjectOpts) error - -func (opt getObjectFn) configureGetObject(opts *getObjectOpts) error { - return opt(opts) -} - -// GetObjectShowDeleted makes Get() return object if it was marked as deleted. -func GetObjectShowDeleted() GetObjectOpt { - return getObjectFn(func(opts *getObjectOpts) error { - opts.showDeleted = true - return nil - }) -} - -// For nats.Context() support. -func (ctx ContextOpt) configureGetObject(opts *getObjectOpts) error { - opts.ctx = ctx - return nil -} - -// Get will pull the object from the underlying stream. -func (obs *obs) Get(name string, opts ...GetObjectOpt) (ObjectResult, error) { - var o getObjectOpts - for _, opt := range opts { - if opt != nil { - if err := opt.configureGetObject(&o); err != nil { - return nil, err - } - } - } - ctx := o.ctx - infoOpts := make([]GetObjectInfoOpt, 0) - if ctx != nil { - infoOpts = append(infoOpts, Context(ctx)) - } - if o.showDeleted { - infoOpts = append(infoOpts, GetObjectInfoShowDeleted()) - } - - // Grab meta info. - info, err := obs.GetInfo(name, infoOpts...) - if err != nil { - return nil, err - } - if info.NUID == _EMPTY_ { - return nil, ErrBadObjectMeta - } - - // Check for object links. If single objects we do a pass through. - if info.isLink() { - if info.ObjectMeta.Opts.Link.Name == _EMPTY_ { - return nil, ErrCantGetBucket - } - - // is the link in the same bucket? - lbuck := info.ObjectMeta.Opts.Link.Bucket - if lbuck == obs.name { - return obs.Get(info.ObjectMeta.Opts.Link.Name) - } - - // different bucket - lobs, err := obs.js.ObjectStore(lbuck) - if err != nil { - return nil, err - } - return lobs.Get(info.ObjectMeta.Opts.Link.Name) - } - - result := &objResult{info: info, ctx: ctx} - if info.Size == 0 { - return result, nil - } - - pr, pw := net.Pipe() - result.r = pr - - gotErr := func(m *Msg, err error) { - pw.Close() - m.Sub.Unsubscribe() - result.setErr(err) - } - - // For calculating sum256 - result.digest = sha256.New() - - processChunk := func(m *Msg) { - var err error - if ctx != nil { - select { - case <-ctx.Done(): - if ctx.Err() == context.Canceled { - err = ctx.Err() - } else { - err = ErrTimeout - } - default: - } - if err != nil { - gotErr(m, err) - return - } - } - - tokens, err := parser.GetMetadataFields(m.Reply) - if err != nil { - gotErr(m, err) - return - } - - // Write to our pipe. - for b := m.Data; len(b) > 0; { - n, err := pw.Write(b) - if err != nil { - gotErr(m, err) - return - } - b = b[n:] - } - // Update sha256 - result.digest.Write(m.Data) - - // Check if we are done. - if tokens[parser.AckNumPendingTokenPos] == objNoPending { - pw.Close() - m.Sub.Unsubscribe() - } - } - - chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, info.NUID) - _, err = obs.js.Subscribe(chunkSubj, processChunk, OrderedConsumer()) - if err != nil { - return nil, err - } - - return result, nil -} - -// Delete will delete the object. -func (obs *obs) Delete(name string) error { - // Grab meta info. - info, err := obs.GetInfo(name, GetObjectInfoShowDeleted()) - if err != nil { - return err - } - if info.NUID == _EMPTY_ { - return ErrBadObjectMeta - } - - // Place a rollup delete marker and publish the info - info.Deleted = true - info.Size, info.Chunks, info.Digest = 0, 0, _EMPTY_ - - if err = publishMeta(info, obs.js); err != nil { - return err - } - - // Purge chunks for the object. - chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, info.NUID) - return obs.js.purgeStream(obs.stream, &StreamPurgeRequest{Subject: chunkSubj}) -} - -func publishMeta(info *ObjectInfo, js JetStreamContext) error { - // marshal the object into json, don't store an actual time - info.ModTime = time.Time{} - data, err := json.Marshal(info) - if err != nil { - return err - } - - // Prepare and publish the message. - mm := NewMsg(fmt.Sprintf(objMetaPreTmpl, info.Bucket, encodeName(info.ObjectMeta.Name))) - mm.Header.Set(MsgRollup, MsgRollupSubject) - mm.Data = data - if _, err := js.PublishMsg(mm); err != nil { - return err - } - - // set the ModTime in case it's returned to the user, even though it's not the correct time. - info.ModTime = time.Now().UTC() - return nil -} - -// AddLink will add a link to another object if it's not deleted and not another link -// name is the name of this link object -// obj is what is being linked too -func (obs *obs) AddLink(name string, obj *ObjectInfo) (*ObjectInfo, error) { - if name == "" { - return nil, ErrNameRequired - } - - // TODO Handle stale info - - if obj == nil || obj.Name == "" { - return nil, ErrObjectRequired - } - if obj.Deleted { - return nil, ErrNoLinkToDeleted - } - if obj.isLink() { - return nil, ErrNoLinkToLink - } - - // If object with link's name is found, error. - // If link with link's name is found, that's okay to overwrite. - // If there was an error that was not ErrObjectNotFound, error. - einfo, err := obs.GetInfo(name, GetObjectInfoShowDeleted()) - if einfo != nil { - if !einfo.isLink() { - return nil, ErrObjectAlreadyExists - } - } else if err != ErrObjectNotFound { - return nil, err - } - - // create the meta for the link - meta := &ObjectMeta{ - Name: name, - Opts: &ObjectMetaOptions{Link: &ObjectLink{Bucket: obj.Bucket, Name: obj.Name}}, - } - info := &ObjectInfo{Bucket: obs.name, NUID: nuid.Next(), ModTime: time.Now().UTC(), ObjectMeta: *meta} - - // put the link object - if err = publishMeta(info, obs.js); err != nil { - return nil, err - } - - return info, nil -} - -// AddBucketLink will add a link to another object store. -func (ob *obs) AddBucketLink(name string, bucket ObjectStore) (*ObjectInfo, error) { - if name == "" { - return nil, ErrNameRequired - } - if bucket == nil { - return nil, ErrBucketRequired - } - bos, ok := bucket.(*obs) - if !ok { - return nil, ErrBucketMalformed - } - - // If object with link's name is found, error. - // If link with link's name is found, that's okay to overwrite. - // If there was an error that was not ErrObjectNotFound, error. - einfo, err := ob.GetInfo(name, GetObjectInfoShowDeleted()) - if einfo != nil { - if !einfo.isLink() { - return nil, ErrObjectAlreadyExists - } - } else if err != ErrObjectNotFound { - return nil, err - } - - // create the meta for the link - meta := &ObjectMeta{ - Name: name, - Opts: &ObjectMetaOptions{Link: &ObjectLink{Bucket: bos.name}}, - } - info := &ObjectInfo{Bucket: ob.name, NUID: nuid.Next(), ObjectMeta: *meta} - - // put the link object - err = publishMeta(info, ob.js) - if err != nil { - return nil, err - } - - return info, nil -} - -// PutBytes is convenience function to put a byte slice into this object store. -func (obs *obs) PutBytes(name string, data []byte, opts ...ObjectOpt) (*ObjectInfo, error) { - return obs.Put(&ObjectMeta{Name: name}, bytes.NewReader(data), opts...) -} - -// GetBytes is a convenience function to pull an object from this object store and return it as a byte slice. -func (obs *obs) GetBytes(name string, opts ...GetObjectOpt) ([]byte, error) { - result, err := obs.Get(name, opts...) - if err != nil { - return nil, err - } - defer result.Close() - - var b bytes.Buffer - if _, err := b.ReadFrom(result); err != nil { - return nil, err - } - return b.Bytes(), nil -} - -// PutString is convenience function to put a string into this object store. -func (obs *obs) PutString(name string, data string, opts ...ObjectOpt) (*ObjectInfo, error) { - return obs.Put(&ObjectMeta{Name: name}, strings.NewReader(data), opts...) -} - -// GetString is a convenience function to pull an object from this object store and return it as a string. -func (obs *obs) GetString(name string, opts ...GetObjectOpt) (string, error) { - result, err := obs.Get(name, opts...) - if err != nil { - return _EMPTY_, err - } - defer result.Close() - - var b bytes.Buffer - if _, err := b.ReadFrom(result); err != nil { - return _EMPTY_, err - } - return b.String(), nil -} - -// PutFile is convenience function to put a file into an object store. -func (obs *obs) PutFile(file string, opts ...ObjectOpt) (*ObjectInfo, error) { - f, err := os.Open(file) - if err != nil { - return nil, err - } - defer f.Close() - return obs.Put(&ObjectMeta{Name: file}, f, opts...) -} - -// GetFile is a convenience function to pull and object and place in a file. -func (obs *obs) GetFile(name, file string, opts ...GetObjectOpt) error { - // Expect file to be new. - f, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE, 0600) - if err != nil { - return err - } - defer f.Close() - - result, err := obs.Get(name, opts...) - if err != nil { - os.Remove(f.Name()) - return err - } - defer result.Close() - - // Stream copy to the file. - _, err = io.Copy(f, result) - return err -} - -type GetObjectInfoOpt interface { - configureGetInfo(opts *getObjectInfoOpts) error -} -type getObjectInfoOpts struct { - ctx context.Context - // Include deleted object in the result. - showDeleted bool -} - -type getObjectInfoFn func(opts *getObjectInfoOpts) error - -func (opt getObjectInfoFn) configureGetInfo(opts *getObjectInfoOpts) error { - return opt(opts) -} - -// GetObjectInfoShowDeleted makes GetInfo() return object if it was marked as deleted. -func GetObjectInfoShowDeleted() GetObjectInfoOpt { - return getObjectInfoFn(func(opts *getObjectInfoOpts) error { - opts.showDeleted = true - return nil - }) -} - -// For nats.Context() support. -func (ctx ContextOpt) configureGetInfo(opts *getObjectInfoOpts) error { - opts.ctx = ctx - return nil -} - -// GetInfo will retrieve the current information for the object. -func (obs *obs) GetInfo(name string, opts ...GetObjectInfoOpt) (*ObjectInfo, error) { - // Grab last meta value we have. - if name == "" { - return nil, ErrNameRequired - } - var o getObjectInfoOpts - for _, opt := range opts { - if opt != nil { - if err := opt.configureGetInfo(&o); err != nil { - return nil, err - } - } - } - - metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(name)) // used as data in a JS API call - stream := fmt.Sprintf(objNameTmpl, obs.name) - - m, err := obs.js.GetLastMsg(stream, metaSubj) - if err != nil { - if err == ErrMsgNotFound { - err = ErrObjectNotFound - } - return nil, err - } - var info ObjectInfo - if err := json.Unmarshal(m.Data, &info); err != nil { - return nil, ErrBadObjectMeta - } - if !o.showDeleted && info.Deleted { - return nil, ErrObjectNotFound - } - info.ModTime = m.Time - return &info, nil -} - -// UpdateMeta will update the meta for the object. -func (obs *obs) UpdateMeta(name string, meta *ObjectMeta) error { - if meta == nil { - return ErrBadObjectMeta - } - - // Grab the current meta. - info, err := obs.GetInfo(name) - if err != nil { - if errors.Is(err, ErrObjectNotFound) { - return ErrUpdateMetaDeleted - } - return err - } - - // If the new name is different from the old, and it exists, error - // If there was an error that was not ErrObjectNotFound, error. - if name != meta.Name { - existingInfo, err := obs.GetInfo(meta.Name, GetObjectInfoShowDeleted()) - if err != nil && !errors.Is(err, ErrObjectNotFound) { - return err - } - if err == nil && !existingInfo.Deleted { - return ErrObjectAlreadyExists - } - } - - // Update Meta prevents update of ObjectMetaOptions (Link, ChunkSize) - // These should only be updated internally when appropriate. - info.Name = meta.Name - info.Description = meta.Description - info.Headers = meta.Headers - info.Metadata = meta.Metadata - - // Prepare the meta message - if err = publishMeta(info, obs.js); err != nil { - return err - } - - // did the name of this object change? We just stored the meta under the new name - // so delete the meta from the old name via purge stream for subject - if name != meta.Name { - metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(name)) - return obs.js.purgeStream(obs.stream, &StreamPurgeRequest{Subject: metaSubj}) - } - - return nil -} - -// Seal will seal the object store, no further modifications will be allowed. -func (obs *obs) Seal() error { - stream := fmt.Sprintf(objNameTmpl, obs.name) - si, err := obs.js.StreamInfo(stream) - if err != nil { - return err - } - // Seal the stream from being able to take on more messages. - cfg := si.Config - cfg.Sealed = true - _, err = obs.js.UpdateStream(&cfg) - return err -} - -// Implementation for Watch -type objWatcher struct { - updates chan *ObjectInfo - sub *Subscription -} - -// Updates returns the interior channel. -func (w *objWatcher) Updates() <-chan *ObjectInfo { - if w == nil { - return nil - } - return w.updates -} - -// Stop will unsubscribe from the watcher. -func (w *objWatcher) Stop() error { - if w == nil { - return nil - } - return w.sub.Unsubscribe() -} - -// Watch for changes in the underlying store and receive meta information updates. -func (obs *obs) Watch(opts ...WatchOpt) (ObjectWatcher, error) { - var o watchOpts - for _, opt := range opts { - if opt != nil { - if err := opt.configureWatcher(&o); err != nil { - return nil, err - } - } - } - - var initDoneMarker bool - - w := &objWatcher{updates: make(chan *ObjectInfo, 32)} - - update := func(m *Msg) { - var info ObjectInfo - if err := json.Unmarshal(m.Data, &info); err != nil { - return // TODO(dlc) - Communicate this upwards? - } - meta, err := m.Metadata() - if err != nil { - return - } - - if !o.ignoreDeletes || !info.Deleted { - info.ModTime = meta.Timestamp - w.updates <- &info - } - - // if UpdatesOnly is set, no not send nil to the channel - // as it would always be triggered after initializing the watcher - if !initDoneMarker && meta.NumPending == 0 { - initDoneMarker = true - w.updates <- nil - } - } - - allMeta := fmt.Sprintf(objAllMetaPreTmpl, obs.name) - _, err := obs.js.GetLastMsg(obs.stream, allMeta) - // if there are no messages on the stream and we are not watching - // updates only, send nil to the channel to indicate that the initial - // watch is done - if !o.updatesOnly { - if errors.Is(err, ErrMsgNotFound) { - initDoneMarker = true - w.updates <- nil - } - } else { - // if UpdatesOnly was used, mark initialization as complete - initDoneMarker = true - } - - // Used ordered consumer to deliver results. - subOpts := []SubOpt{OrderedConsumer()} - if !o.includeHistory { - subOpts = append(subOpts, DeliverLastPerSubject()) - } - if o.updatesOnly { - subOpts = append(subOpts, DeliverNew()) - } - sub, err := obs.js.Subscribe(allMeta, update, subOpts...) - if err != nil { - return nil, err - } - w.sub = sub - return w, nil -} - -type ListObjectsOpt interface { - configureListObjects(opts *listObjectOpts) error -} -type listObjectOpts struct { - ctx context.Context - // Include deleted objects in the result channel. - showDeleted bool -} - -type listObjectsFn func(opts *listObjectOpts) error - -func (opt listObjectsFn) configureListObjects(opts *listObjectOpts) error { - return opt(opts) -} - -// ListObjectsShowDeleted makes ListObjects() return deleted objects. -func ListObjectsShowDeleted() ListObjectsOpt { - return listObjectsFn(func(opts *listObjectOpts) error { - opts.showDeleted = true - return nil - }) -} - -// For nats.Context() support. -func (ctx ContextOpt) configureListObjects(opts *listObjectOpts) error { - opts.ctx = ctx - return nil -} - -// List will list all the objects in this store. -func (obs *obs) List(opts ...ListObjectsOpt) ([]*ObjectInfo, error) { - var o listObjectOpts - for _, opt := range opts { - if opt != nil { - if err := opt.configureListObjects(&o); err != nil { - return nil, err - } - } - } - watchOpts := make([]WatchOpt, 0) - if !o.showDeleted { - watchOpts = append(watchOpts, IgnoreDeletes()) - } - watcher, err := obs.Watch(watchOpts...) - if err != nil { - return nil, err - } - defer watcher.Stop() - if o.ctx == nil { - o.ctx = context.Background() - } - - var objs []*ObjectInfo - updates := watcher.Updates() -Updates: - for { - select { - case entry := <-updates: - if entry == nil { - break Updates - } - objs = append(objs, entry) - case <-o.ctx.Done(): - return nil, o.ctx.Err() - } - } - if len(objs) == 0 { - return nil, ErrNoObjectsFound - } - return objs, nil -} - -// ObjectBucketStatus represents status of a Bucket, implements ObjectStoreStatus -type ObjectBucketStatus struct { - nfo *StreamInfo - bucket string -} - -// Bucket is the name of the bucket -func (s *ObjectBucketStatus) Bucket() string { return s.bucket } - -// Description is the description supplied when creating the bucket -func (s *ObjectBucketStatus) Description() string { return s.nfo.Config.Description } - -// TTL indicates how long objects are kept in the bucket -func (s *ObjectBucketStatus) TTL() time.Duration { return s.nfo.Config.MaxAge } - -// Storage indicates the underlying JetStream storage technology used to store data -func (s *ObjectBucketStatus) Storage() StorageType { return s.nfo.Config.Storage } - -// Replicas indicates how many storage replicas are kept for the data in the bucket -func (s *ObjectBucketStatus) Replicas() int { return s.nfo.Config.Replicas } - -// Sealed indicates the stream is sealed and cannot be modified in any way -func (s *ObjectBucketStatus) Sealed() bool { return s.nfo.Config.Sealed } - -// Size is the combined size of all data in the bucket including metadata, in bytes -func (s *ObjectBucketStatus) Size() uint64 { return s.nfo.State.Bytes } - -// BackingStore indicates what technology is used for storage of the bucket -func (s *ObjectBucketStatus) BackingStore() string { return "JetStream" } - -// Metadata is the metadata supplied when creating the bucket -func (s *ObjectBucketStatus) Metadata() map[string]string { return s.nfo.Config.Metadata } - -// StreamInfo is the stream info retrieved to create the status -func (s *ObjectBucketStatus) StreamInfo() *StreamInfo { return s.nfo } - -// Status retrieves run-time status about a bucket -func (obs *obs) Status() (ObjectStoreStatus, error) { - nfo, err := obs.js.StreamInfo(obs.stream) - if err != nil { - return nil, err - } - - status := &ObjectBucketStatus{ - nfo: nfo, - bucket: obs.name, - } - - return status, nil -} - -// Read impl. -func (o *objResult) Read(p []byte) (n int, err error) { - o.Lock() - defer o.Unlock() - if ctx := o.ctx; ctx != nil { - select { - case <-ctx.Done(): - if ctx.Err() == context.Canceled { - o.err = ctx.Err() - } else { - o.err = ErrTimeout - } - default: - } - } - if o.err != nil { - return 0, o.err - } - if o.r == nil { - return 0, io.EOF - } - - r := o.r.(net.Conn) - r.SetReadDeadline(time.Now().Add(2 * time.Second)) - n, err = r.Read(p) - if err, ok := err.(net.Error); ok && err.Timeout() { - if ctx := o.ctx; ctx != nil { - select { - case <-ctx.Done(): - if ctx.Err() == context.Canceled { - return 0, ctx.Err() - } else { - return 0, ErrTimeout - } - default: - err = nil - } - } - } - if err == io.EOF { - // Make sure the digest matches. - sha := o.digest.Sum(nil) - rsha, decodeErr := DecodeObjectDigest(o.info.Digest) - if decodeErr != nil { - o.err = decodeErr - return 0, o.err - } - if !bytes.Equal(sha[:], rsha) { - o.err = ErrDigestMismatch - return 0, o.err - } - } - return n, err -} - -// Close impl. -func (o *objResult) Close() error { - o.Lock() - defer o.Unlock() - if o.r == nil { - return nil - } - return o.r.Close() -} - -func (o *objResult) setErr(err error) { - o.Lock() - defer o.Unlock() - o.err = err -} - -func (o *objResult) Info() (*ObjectInfo, error) { - o.Lock() - defer o.Unlock() - return o.info, o.err -} - -func (o *objResult) Error() error { - o.Lock() - defer o.Unlock() - return o.err -} - -// ObjectStoreNames is used to retrieve a list of bucket names -func (js *js) ObjectStoreNames(opts ...ObjectOpt) <-chan string { - var o objOpts - for _, opt := range opts { - if opt != nil { - if err := opt.configureObject(&o); err != nil { - return nil - } - } - } - ch := make(chan string) - var cancel context.CancelFunc - if o.ctx == nil { - o.ctx, cancel = context.WithTimeout(context.Background(), defaultRequestWait) - } - l := &streamLister{js: js} - l.js.opts.streamListSubject = fmt.Sprintf(objAllChunksPreTmpl, "*") - l.js.opts.ctx = o.ctx - go func() { - if cancel != nil { - defer cancel() - } - defer close(ch) - for l.Next() { - for _, info := range l.Page() { - if !strings.HasPrefix(info.Config.Name, "OBJ_") { - continue - } - select { - case ch <- info.Config.Name: - case <-o.ctx.Done(): - return - } - } - } - }() - - return ch -} - -// ObjectStores is used to retrieve a list of bucket statuses -func (js *js) ObjectStores(opts ...ObjectOpt) <-chan ObjectStoreStatus { - var o objOpts - for _, opt := range opts { - if opt != nil { - if err := opt.configureObject(&o); err != nil { - return nil - } - } - } - ch := make(chan ObjectStoreStatus) - var cancel context.CancelFunc - if o.ctx == nil { - o.ctx, cancel = context.WithTimeout(context.Background(), defaultRequestWait) - } - l := &streamLister{js: js} - l.js.opts.streamListSubject = fmt.Sprintf(objAllChunksPreTmpl, "*") - l.js.opts.ctx = o.ctx - go func() { - if cancel != nil { - defer cancel() - } - defer close(ch) - for l.Next() { - for _, info := range l.Page() { - if !strings.HasPrefix(info.Config.Name, "OBJ_") { - continue - } - select { - case ch <- &ObjectBucketStatus{ - nfo: info, - bucket: strings.TrimPrefix(info.Config.Name, "OBJ_"), - }: - case <-o.ctx.Done(): - return - } - } - } - }() - - return ch -} diff --git a/vendor/github.com/nats-io/nats.go/parser.go b/vendor/github.com/nats-io/nats.go/parser.go deleted file mode 100644 index 70204e60..00000000 --- a/vendor/github.com/nats-io/nats.go/parser.go +++ /dev/null @@ -1,554 +0,0 @@ -// Copyright 2012-2023 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nats - -import ( - "fmt" -) - -type msgArg struct { - subject []byte - reply []byte - sid int64 - hdr int - size int -} - -const MAX_CONTROL_LINE_SIZE = 4096 - -type parseState struct { - state int - as int - drop int - hdr int - ma msgArg - argBuf []byte - msgBuf []byte - msgCopied bool - scratch [MAX_CONTROL_LINE_SIZE]byte -} - -const ( - OP_START = iota - OP_PLUS - OP_PLUS_O - OP_PLUS_OK - OP_MINUS - OP_MINUS_E - OP_MINUS_ER - OP_MINUS_ERR - OP_MINUS_ERR_SPC - MINUS_ERR_ARG - OP_M - OP_MS - OP_MSG - OP_MSG_SPC - MSG_ARG - MSG_PAYLOAD - MSG_END - OP_H - OP_P - OP_PI - OP_PIN - OP_PING - OP_PO - OP_PON - OP_PONG - OP_I - OP_IN - OP_INF - OP_INFO - OP_INFO_SPC - INFO_ARG -) - -// parse is the fast protocol parser engine. -func (nc *Conn) parse(buf []byte) error { - var i int - var b byte - - // Move to loop instead of range syntax to allow jumping of i - for i = 0; i < len(buf); i++ { - b = buf[i] - - switch nc.ps.state { - case OP_START: - switch b { - case 'M', 'm': - nc.ps.state = OP_M - nc.ps.hdr = -1 - nc.ps.ma.hdr = -1 - case 'H', 'h': - nc.ps.state = OP_H - nc.ps.hdr = 0 - nc.ps.ma.hdr = 0 - case 'P', 'p': - nc.ps.state = OP_P - case '+': - nc.ps.state = OP_PLUS - case '-': - nc.ps.state = OP_MINUS - case 'I', 'i': - nc.ps.state = OP_I - default: - goto parseErr - } - case OP_H: - switch b { - case 'M', 'm': - nc.ps.state = OP_M - default: - goto parseErr - } - case OP_M: - switch b { - case 'S', 's': - nc.ps.state = OP_MS - default: - goto parseErr - } - case OP_MS: - switch b { - case 'G', 'g': - nc.ps.state = OP_MSG - default: - goto parseErr - } - case OP_MSG: - switch b { - case ' ', '\t': - nc.ps.state = OP_MSG_SPC - default: - goto parseErr - } - case OP_MSG_SPC: - switch b { - case ' ', '\t': - continue - default: - nc.ps.state = MSG_ARG - nc.ps.as = i - } - case MSG_ARG: - switch b { - case '\r': - nc.ps.drop = 1 - case '\n': - var arg []byte - if nc.ps.argBuf != nil { - arg = nc.ps.argBuf - } else { - arg = buf[nc.ps.as : i-nc.ps.drop] - } - if err := nc.processMsgArgs(arg); err != nil { - return err - } - nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, MSG_PAYLOAD - - // jump ahead with the index. If this overruns - // what is left we fall out and process a split buffer. - i = nc.ps.as + nc.ps.ma.size - 1 - default: - if nc.ps.argBuf != nil { - nc.ps.argBuf = append(nc.ps.argBuf, b) - } - } - case MSG_PAYLOAD: - if nc.ps.msgBuf != nil { - if len(nc.ps.msgBuf) >= nc.ps.ma.size { - nc.processMsg(nc.ps.msgBuf) - nc.ps.argBuf, nc.ps.msgBuf, nc.ps.msgCopied, nc.ps.state = nil, nil, false, MSG_END - } else { - // copy as much as we can to the buffer and skip ahead. - toCopy := nc.ps.ma.size - len(nc.ps.msgBuf) - avail := len(buf) - i - - if avail < toCopy { - toCopy = avail - } - - if toCopy > 0 { - start := len(nc.ps.msgBuf) - // This is needed for copy to work. - nc.ps.msgBuf = nc.ps.msgBuf[:start+toCopy] - copy(nc.ps.msgBuf[start:], buf[i:i+toCopy]) - // Update our index - i = (i + toCopy) - 1 - } else { - nc.ps.msgBuf = append(nc.ps.msgBuf, b) - } - } - } else if i-nc.ps.as >= nc.ps.ma.size { - nc.processMsg(buf[nc.ps.as:i]) - nc.ps.argBuf, nc.ps.msgBuf, nc.ps.msgCopied, nc.ps.state = nil, nil, false, MSG_END - } - case MSG_END: - switch b { - case '\n': - nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START - default: - continue - } - case OP_PLUS: - switch b { - case 'O', 'o': - nc.ps.state = OP_PLUS_O - default: - goto parseErr - } - case OP_PLUS_O: - switch b { - case 'K', 'k': - nc.ps.state = OP_PLUS_OK - default: - goto parseErr - } - case OP_PLUS_OK: - switch b { - case '\n': - nc.processOK() - nc.ps.drop, nc.ps.state = 0, OP_START - } - case OP_MINUS: - switch b { - case 'E', 'e': - nc.ps.state = OP_MINUS_E - default: - goto parseErr - } - case OP_MINUS_E: - switch b { - case 'R', 'r': - nc.ps.state = OP_MINUS_ER - default: - goto parseErr - } - case OP_MINUS_ER: - switch b { - case 'R', 'r': - nc.ps.state = OP_MINUS_ERR - default: - goto parseErr - } - case OP_MINUS_ERR: - switch b { - case ' ', '\t': - nc.ps.state = OP_MINUS_ERR_SPC - default: - goto parseErr - } - case OP_MINUS_ERR_SPC: - switch b { - case ' ', '\t': - continue - default: - nc.ps.state = MINUS_ERR_ARG - nc.ps.as = i - } - case MINUS_ERR_ARG: - switch b { - case '\r': - nc.ps.drop = 1 - case '\n': - var arg []byte - if nc.ps.argBuf != nil { - arg = nc.ps.argBuf - nc.ps.argBuf = nil - } else { - arg = buf[nc.ps.as : i-nc.ps.drop] - } - nc.processErr(string(arg)) - nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START - default: - if nc.ps.argBuf != nil { - nc.ps.argBuf = append(nc.ps.argBuf, b) - } - } - case OP_P: - switch b { - case 'I', 'i': - nc.ps.state = OP_PI - case 'O', 'o': - nc.ps.state = OP_PO - default: - goto parseErr - } - case OP_PO: - switch b { - case 'N', 'n': - nc.ps.state = OP_PON - default: - goto parseErr - } - case OP_PON: - switch b { - case 'G', 'g': - nc.ps.state = OP_PONG - default: - goto parseErr - } - case OP_PONG: - switch b { - case '\n': - nc.processPong() - nc.ps.drop, nc.ps.state = 0, OP_START - } - case OP_PI: - switch b { - case 'N', 'n': - nc.ps.state = OP_PIN - default: - goto parseErr - } - case OP_PIN: - switch b { - case 'G', 'g': - nc.ps.state = OP_PING - default: - goto parseErr - } - case OP_PING: - switch b { - case '\n': - nc.processPing() - nc.ps.drop, nc.ps.state = 0, OP_START - } - case OP_I: - switch b { - case 'N', 'n': - nc.ps.state = OP_IN - default: - goto parseErr - } - case OP_IN: - switch b { - case 'F', 'f': - nc.ps.state = OP_INF - default: - goto parseErr - } - case OP_INF: - switch b { - case 'O', 'o': - nc.ps.state = OP_INFO - default: - goto parseErr - } - case OP_INFO: - switch b { - case ' ', '\t': - nc.ps.state = OP_INFO_SPC - default: - goto parseErr - } - case OP_INFO_SPC: - switch b { - case ' ', '\t': - continue - default: - nc.ps.state = INFO_ARG - nc.ps.as = i - } - case INFO_ARG: - switch b { - case '\r': - nc.ps.drop = 1 - case '\n': - var arg []byte - if nc.ps.argBuf != nil { - arg = nc.ps.argBuf - nc.ps.argBuf = nil - } else { - arg = buf[nc.ps.as : i-nc.ps.drop] - } - nc.processAsyncInfo(arg) - nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START - default: - if nc.ps.argBuf != nil { - nc.ps.argBuf = append(nc.ps.argBuf, b) - } - } - default: - goto parseErr - } - } - // Check for split buffer scenarios - if (nc.ps.state == MSG_ARG || nc.ps.state == MINUS_ERR_ARG || nc.ps.state == INFO_ARG) && nc.ps.argBuf == nil { - nc.ps.argBuf = nc.ps.scratch[:0] - nc.ps.argBuf = append(nc.ps.argBuf, buf[nc.ps.as:i-nc.ps.drop]...) - // FIXME, check max len - } - // Check for split msg - if nc.ps.state == MSG_PAYLOAD && nc.ps.msgBuf == nil { - // We need to clone the msgArg if it is still referencing the - // read buffer and we are not able to process the msg. - if nc.ps.argBuf == nil { - nc.cloneMsgArg() - } - - // If we will overflow the scratch buffer, just create a - // new buffer to hold the split message. - if nc.ps.ma.size > cap(nc.ps.scratch)-len(nc.ps.argBuf) { - lrem := len(buf[nc.ps.as:]) - - nc.ps.msgBuf = make([]byte, lrem, nc.ps.ma.size) - copy(nc.ps.msgBuf, buf[nc.ps.as:]) - nc.ps.msgCopied = true - } else { - nc.ps.msgBuf = nc.ps.scratch[len(nc.ps.argBuf):len(nc.ps.argBuf)] - nc.ps.msgBuf = append(nc.ps.msgBuf, (buf[nc.ps.as:])...) - } - } - - return nil - -parseErr: - return fmt.Errorf("nats: Parse Error [%d]: '%s'", nc.ps.state, buf[i:]) -} - -// cloneMsgArg is used when the split buffer scenario has the pubArg in the existing read buffer, but -// we need to hold onto it into the next read. -func (nc *Conn) cloneMsgArg() { - nc.ps.argBuf = nc.ps.scratch[:0] - nc.ps.argBuf = append(nc.ps.argBuf, nc.ps.ma.subject...) - nc.ps.argBuf = append(nc.ps.argBuf, nc.ps.ma.reply...) - nc.ps.ma.subject = nc.ps.argBuf[:len(nc.ps.ma.subject)] - if nc.ps.ma.reply != nil { - nc.ps.ma.reply = nc.ps.argBuf[len(nc.ps.ma.subject):] - } -} - -const argsLenMax = 4 - -func (nc *Conn) processMsgArgs(arg []byte) error { - // Use separate function for header based messages. - if nc.ps.hdr >= 0 { - return nc.processHeaderMsgArgs(arg) - } - - // Unroll splitArgs to avoid runtime/heap issues - a := [argsLenMax][]byte{} - args := a[:0] - start := -1 - for i, b := range arg { - switch b { - case ' ', '\t', '\r', '\n': - if start >= 0 { - args = append(args, arg[start:i]) - start = -1 - } - default: - if start < 0 { - start = i - } - } - } - if start >= 0 { - args = append(args, arg[start:]) - } - - switch len(args) { - case 3: - nc.ps.ma.subject = args[0] - nc.ps.ma.sid = parseInt64(args[1]) - nc.ps.ma.reply = nil - nc.ps.ma.size = int(parseInt64(args[2])) - case 4: - nc.ps.ma.subject = args[0] - nc.ps.ma.sid = parseInt64(args[1]) - nc.ps.ma.reply = args[2] - nc.ps.ma.size = int(parseInt64(args[3])) - default: - return fmt.Errorf("nats: processMsgArgs Parse Error: '%s'", arg) - } - if nc.ps.ma.sid < 0 { - return fmt.Errorf("nats: processMsgArgs Bad or Missing Sid: '%s'", arg) - } - if nc.ps.ma.size < 0 { - return fmt.Errorf("nats: processMsgArgs Bad or Missing Size: '%s'", arg) - } - return nil -} - -// processHeaderMsgArgs is for a header based message. -func (nc *Conn) processHeaderMsgArgs(arg []byte) error { - // Unroll splitArgs to avoid runtime/heap issues - a := [argsLenMax][]byte{} - args := a[:0] - start := -1 - for i, b := range arg { - switch b { - case ' ', '\t', '\r', '\n': - if start >= 0 { - args = append(args, arg[start:i]) - start = -1 - } - default: - if start < 0 { - start = i - } - } - } - if start >= 0 { - args = append(args, arg[start:]) - } - - switch len(args) { - case 4: - nc.ps.ma.subject = args[0] - nc.ps.ma.sid = parseInt64(args[1]) - nc.ps.ma.reply = nil - nc.ps.ma.hdr = int(parseInt64(args[2])) - nc.ps.ma.size = int(parseInt64(args[3])) - case 5: - nc.ps.ma.subject = args[0] - nc.ps.ma.sid = parseInt64(args[1]) - nc.ps.ma.reply = args[2] - nc.ps.ma.hdr = int(parseInt64(args[3])) - nc.ps.ma.size = int(parseInt64(args[4])) - default: - return fmt.Errorf("nats: processHeaderMsgArgs Parse Error: '%s'", arg) - } - if nc.ps.ma.sid < 0 { - return fmt.Errorf("nats: processHeaderMsgArgs Bad or Missing Sid: '%s'", arg) - } - if nc.ps.ma.hdr < 0 || nc.ps.ma.hdr > nc.ps.ma.size { - return fmt.Errorf("nats: processHeaderMsgArgs Bad or Missing Header Size: '%s'", arg) - } - if nc.ps.ma.size < 0 { - return fmt.Errorf("nats: processHeaderMsgArgs Bad or Missing Size: '%s'", arg) - } - return nil -} - -// ASCII numbers 0-9 -const ( - ascii_0 = 48 - ascii_9 = 57 -) - -// parseInt64 expects decimal positive numbers. We -// return -1 to signal error -func parseInt64(d []byte) (n int64) { - if len(d) == 0 { - return -1 - } - for _, dec := range d { - if dec < ascii_0 || dec > ascii_9 { - return -1 - } - n = n*10 + (int64(dec) - ascii_0) - } - return n -} diff --git a/vendor/github.com/nats-io/nats.go/rand.go b/vendor/github.com/nats-io/nats.go/rand.go deleted file mode 100644 index 0cdee0ac..00000000 --- a/vendor/github.com/nats-io/nats.go/rand.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2023 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.20 -// +build !go1.20 - -// A Go client for the NATS messaging system (https://nats.io). -package nats - -import ( - "math/rand" - "time" -) - -func init() { - // This is not needed since Go 1.20 because now rand.Seed always happens - // by default (uses runtime.fastrand64 instead as source). - rand.Seed(time.Now().UnixNano()) -} diff --git a/vendor/github.com/nats-io/nats.go/timer.go b/vendor/github.com/nats-io/nats.go/timer.go deleted file mode 100644 index 4fb02ecb..00000000 --- a/vendor/github.com/nats-io/nats.go/timer.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2017-2022 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nats - -import ( - "sync" - "time" -) - -// global pool of *time.Timer's. can be used by multiple goroutines concurrently. -var globalTimerPool timerPool - -// timerPool provides GC-able pooling of *time.Timer's. -// can be used by multiple goroutines concurrently. -type timerPool struct { - p sync.Pool -} - -// Get returns a timer that completes after the given duration. -func (tp *timerPool) Get(d time.Duration) *time.Timer { - if t, _ := tp.p.Get().(*time.Timer); t != nil { - t.Reset(d) - return t - } - - return time.NewTimer(d) -} - -// Put pools the given timer. -// -// There is no need to call t.Stop() before calling Put. -// -// Put will try to stop the timer before pooling. If the -// given timer already expired, Put will read the unreceived -// value if there is one. -func (tp *timerPool) Put(t *time.Timer) { - if !t.Stop() { - select { - case <-t.C: - default: - } - } - - tp.p.Put(t) -} diff --git a/vendor/github.com/nats-io/nats.go/util/tls.go b/vendor/github.com/nats-io/nats.go/util/tls.go deleted file mode 100644 index af9f51f0..00000000 --- a/vendor/github.com/nats-io/nats.go/util/tls.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2017-2022 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.8 -// +build go1.8 - -package util - -import "crypto/tls" - -// CloneTLSConfig returns a copy of c. -func CloneTLSConfig(c *tls.Config) *tls.Config { - if c == nil { - return &tls.Config{} - } - - return c.Clone() -} diff --git a/vendor/github.com/nats-io/nats.go/util/tls_go17.go b/vendor/github.com/nats-io/nats.go/util/tls_go17.go deleted file mode 100644 index 44d46b42..00000000 --- a/vendor/github.com/nats-io/nats.go/util/tls_go17.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2016-2022 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.7 && !go1.8 -// +build go1.7,!go1.8 - -package util - -import ( - "crypto/tls" -) - -// CloneTLSConfig returns a copy of c. Only the exported fields are copied. -// This is temporary, until this is provided by the language. -// https://go-review.googlesource.com/#/c/28075/ -func CloneTLSConfig(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, - Renegotiation: c.Renegotiation, - } -} diff --git a/vendor/github.com/nats-io/nats.go/ws.go b/vendor/github.com/nats-io/nats.go/ws.go deleted file mode 100644 index 2c2d421a..00000000 --- a/vendor/github.com/nats-io/nats.go/ws.go +++ /dev/null @@ -1,780 +0,0 @@ -// Copyright 2021-2023 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nats - -import ( - "bufio" - "bytes" - "crypto/rand" - "crypto/sha1" - "encoding/base64" - "encoding/binary" - "errors" - "fmt" - "io" - mrand "math/rand" - "net/http" - "net/url" - "strings" - "time" - "unicode/utf8" - - "github.com/klauspost/compress/flate" -) - -type wsOpCode int - -const ( - // From https://tools.ietf.org/html/rfc6455#section-5.2 - wsTextMessage = wsOpCode(1) - wsBinaryMessage = wsOpCode(2) - wsCloseMessage = wsOpCode(8) - wsPingMessage = wsOpCode(9) - wsPongMessage = wsOpCode(10) - - wsFinalBit = 1 << 7 - wsRsv1Bit = 1 << 6 // Used for compression, from https://tools.ietf.org/html/rfc7692#section-6 - wsRsv2Bit = 1 << 5 - wsRsv3Bit = 1 << 4 - - wsMaskBit = 1 << 7 - - wsContinuationFrame = 0 - wsMaxFrameHeaderSize = 14 - wsMaxControlPayloadSize = 125 - wsCloseSatusSize = 2 - - // From https://tools.ietf.org/html/rfc6455#section-11.7 - wsCloseStatusNormalClosure = 1000 - wsCloseStatusNoStatusReceived = 1005 - wsCloseStatusAbnormalClosure = 1006 - wsCloseStatusInvalidPayloadData = 1007 - - wsScheme = "ws" - wsSchemeTLS = "wss" - - wsPMCExtension = "permessage-deflate" // per-message compression - wsPMCSrvNoCtx = "server_no_context_takeover" - wsPMCCliNoCtx = "client_no_context_takeover" - wsPMCReqHeaderValue = wsPMCExtension + "; " + wsPMCSrvNoCtx + "; " + wsPMCCliNoCtx -) - -// From https://tools.ietf.org/html/rfc6455#section-1.3 -var wsGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") - -var compressFinalBlock = []byte{0x00, 0x00, 0xff, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff} - -type websocketReader struct { - r io.Reader - pending [][]byte - ib []byte - ff bool - fc bool - nl bool - dc *wsDecompressor - nc *Conn -} - -type wsDecompressor struct { - flate io.ReadCloser - bufs [][]byte - off int -} - -type websocketWriter struct { - w io.Writer - compress bool - compressor *flate.Writer - ctrlFrames [][]byte // pending frames that should be sent at the next Write() - cm []byte // close message that needs to be sent when everything else has been sent - cmDone bool // a close message has been added or sent (never going back to false) - noMoreSend bool // if true, even if there is a Write() call, we should not send anything -} - -func (d *wsDecompressor) Read(dst []byte) (int, error) { - if len(dst) == 0 { - return 0, nil - } - if len(d.bufs) == 0 { - return 0, io.EOF - } - copied := 0 - rem := len(dst) - for buf := d.bufs[0]; buf != nil && rem > 0; { - n := len(buf[d.off:]) - if n > rem { - n = rem - } - copy(dst[copied:], buf[d.off:d.off+n]) - copied += n - rem -= n - d.off += n - buf = d.nextBuf() - } - return copied, nil -} - -func (d *wsDecompressor) nextBuf() []byte { - // We still have remaining data in the first buffer - if d.off != len(d.bufs[0]) { - return d.bufs[0] - } - // We read the full first buffer. Reset offset. - d.off = 0 - // We were at the last buffer, so we are done. - if len(d.bufs) == 1 { - d.bufs = nil - return nil - } - // Here we move to the next buffer. - d.bufs = d.bufs[1:] - return d.bufs[0] -} - -func (d *wsDecompressor) ReadByte() (byte, error) { - if len(d.bufs) == 0 { - return 0, io.EOF - } - b := d.bufs[0][d.off] - d.off++ - d.nextBuf() - return b, nil -} - -func (d *wsDecompressor) addBuf(b []byte) { - d.bufs = append(d.bufs, b) -} - -func (d *wsDecompressor) decompress() ([]byte, error) { - d.off = 0 - // As per https://tools.ietf.org/html/rfc7692#section-7.2.2 - // add 0x00, 0x00, 0xff, 0xff and then a final block so that flate reader - // does not report unexpected EOF. - d.bufs = append(d.bufs, compressFinalBlock) - // Create or reset the decompressor with his object (wsDecompressor) - // that provides Read() and ReadByte() APIs that will consume from - // the compressed buffers (d.bufs). - if d.flate == nil { - d.flate = flate.NewReader(d) - } else { - d.flate.(flate.Resetter).Reset(d, nil) - } - b, err := io.ReadAll(d.flate) - // Now reset the compressed buffers list - d.bufs = nil - return b, err -} - -func wsNewReader(r io.Reader) *websocketReader { - return &websocketReader{r: r, ff: true} -} - -// From now on, reads will be from the readLoop and we will need to -// acquire the connection lock should we have to send/write a control -// message from handleControlFrame. -// -// Note: this runs under the connection lock. -func (r *websocketReader) doneWithConnect() { - r.nl = true -} - -func (r *websocketReader) Read(p []byte) (int, error) { - var err error - var buf []byte - - if l := len(r.ib); l > 0 { - buf = r.ib - r.ib = nil - } else { - if len(r.pending) > 0 { - return r.drainPending(p), nil - } - - // Get some data from the underlying reader. - n, err := r.r.Read(p) - if err != nil { - return 0, err - } - buf = p[:n] - } - - // Now parse this and decode frames. We will possibly read more to - // ensure that we get a full frame. - var ( - tmpBuf []byte - pos int - max = len(buf) - rem = 0 - ) - for pos < max { - b0 := buf[pos] - frameType := wsOpCode(b0 & 0xF) - final := b0&wsFinalBit != 0 - compressed := b0&wsRsv1Bit != 0 - pos++ - - tmpBuf, pos, err = wsGet(r.r, buf, pos, 1) - if err != nil { - return 0, err - } - b1 := tmpBuf[0] - - // Store size in case it is < 125 - rem = int(b1 & 0x7F) - - switch frameType { - case wsPingMessage, wsPongMessage, wsCloseMessage: - if rem > wsMaxControlPayloadSize { - return 0, fmt.Errorf( - fmt.Sprintf("control frame length bigger than maximum allowed of %v bytes", - wsMaxControlPayloadSize)) - } - if compressed { - return 0, errors.New("control frame should not be compressed") - } - if !final { - return 0, errors.New("control frame does not have final bit set") - } - case wsTextMessage, wsBinaryMessage: - if !r.ff { - return 0, errors.New("new message started before final frame for previous message was received") - } - r.ff = final - r.fc = compressed - case wsContinuationFrame: - // Compressed bit must be only set in the first frame - if r.ff || compressed { - return 0, errors.New("invalid continuation frame") - } - r.ff = final - default: - return 0, fmt.Errorf("unknown opcode %v", frameType) - } - - // If the encoded size is <= 125, then `rem` is simply the remainder size of the - // frame. If it is 126, then the actual size is encoded as a uint16. For larger - // frames, `rem` will initially be 127 and the actual size is encoded as a uint64. - switch rem { - case 126: - tmpBuf, pos, err = wsGet(r.r, buf, pos, 2) - if err != nil { - return 0, err - } - rem = int(binary.BigEndian.Uint16(tmpBuf)) - case 127: - tmpBuf, pos, err = wsGet(r.r, buf, pos, 8) - if err != nil { - return 0, err - } - rem = int(binary.BigEndian.Uint64(tmpBuf)) - } - - // Handle control messages in place... - if wsIsControlFrame(frameType) { - pos, err = r.handleControlFrame(frameType, buf, pos, rem) - if err != nil { - return 0, err - } - rem = 0 - continue - } - - var b []byte - // This ensures that we get the full payload for this frame. - b, pos, err = wsGet(r.r, buf, pos, rem) - if err != nil { - return 0, err - } - // We read the full frame. - rem = 0 - addToPending := true - if r.fc { - // Don't add to pending if we are not dealing with the final frame. - addToPending = r.ff - // Add the compressed payload buffer to the list. - r.addCBuf(b) - // Decompress only when this is the final frame. - if r.ff { - b, err = r.dc.decompress() - if err != nil { - return 0, err - } - r.fc = false - } - } - // Add to the pending list if dealing with uncompressed frames or - // after we have received the full compressed message and decompressed it. - if addToPending { - r.pending = append(r.pending, b) - } - } - // In case of compression, there may be nothing to drain - if len(r.pending) > 0 { - return r.drainPending(p), nil - } - return 0, nil -} - -func (r *websocketReader) addCBuf(b []byte) { - if r.dc == nil { - r.dc = &wsDecompressor{} - } - // Add a copy of the incoming buffer to the list of compressed buffers. - r.dc.addBuf(append([]byte(nil), b...)) -} - -func (r *websocketReader) drainPending(p []byte) int { - var n int - var max = len(p) - - for i, buf := range r.pending { - if n+len(buf) <= max { - copy(p[n:], buf) - n += len(buf) - } else { - // Is there room left? - if n < max { - // Write the partial and update this slice. - rem := max - n - copy(p[n:], buf[:rem]) - n += rem - r.pending[i] = buf[rem:] - } - // These are the remaining slices that will need to be used at - // the next Read() call. - r.pending = r.pending[i:] - return n - } - } - r.pending = r.pending[:0] - return n -} - -func wsGet(r io.Reader, buf []byte, pos, needed int) ([]byte, int, error) { - avail := len(buf) - pos - if avail >= needed { - return buf[pos : pos+needed], pos + needed, nil - } - b := make([]byte, needed) - start := copy(b, buf[pos:]) - for start != needed { - n, err := r.Read(b[start:cap(b)]) - start += n - if err != nil { - return b, start, err - } - } - return b, pos + avail, nil -} - -func (r *websocketReader) handleControlFrame(frameType wsOpCode, buf []byte, pos, rem int) (int, error) { - var payload []byte - var err error - - if rem > 0 { - payload, pos, err = wsGet(r.r, buf, pos, rem) - if err != nil { - return pos, err - } - } - switch frameType { - case wsCloseMessage: - status := wsCloseStatusNoStatusReceived - var body string - lp := len(payload) - // If there is a payload, the status is represented as a 2-byte - // unsigned integer (in network byte order). Then, there may be an - // optional body. - hasStatus, hasBody := lp >= wsCloseSatusSize, lp > wsCloseSatusSize - if hasStatus { - // Decode the status - status = int(binary.BigEndian.Uint16(payload[:wsCloseSatusSize])) - // Now if there is a body, capture it and make sure this is a valid UTF-8. - if hasBody { - body = string(payload[wsCloseSatusSize:]) - if !utf8.ValidString(body) { - // https://tools.ietf.org/html/rfc6455#section-5.5.1 - // If body is present, it must be a valid utf8 - status = wsCloseStatusInvalidPayloadData - body = "invalid utf8 body in close frame" - } - } - } - r.nc.wsEnqueueCloseMsg(r.nl, status, body) - // Return io.EOF so that readLoop will close the connection as client closed - // after processing pending buffers. - return pos, io.EOF - case wsPingMessage: - r.nc.wsEnqueueControlMsg(r.nl, wsPongMessage, payload) - case wsPongMessage: - // Nothing to do.. - } - return pos, nil -} - -func (w *websocketWriter) Write(p []byte) (int, error) { - if w.noMoreSend { - return 0, nil - } - var total int - var n int - var err error - // If there are control frames, they can be sent now. Actually spec says - // that they should be sent ASAP, so we will send before any application data. - if len(w.ctrlFrames) > 0 { - n, err = w.writeCtrlFrames() - if err != nil { - return n, err - } - total += n - } - // Do the following only if there is something to send. - // We will end with checking for need to send close message. - if len(p) > 0 { - if w.compress { - buf := &bytes.Buffer{} - if w.compressor == nil { - w.compressor, _ = flate.NewWriter(buf, flate.BestSpeed) - } else { - w.compressor.Reset(buf) - } - if n, err = w.compressor.Write(p); err != nil { - return n, err - } - if err = w.compressor.Flush(); err != nil { - return n, err - } - b := buf.Bytes() - p = b[:len(b)-4] - } - fh, key := wsCreateFrameHeader(w.compress, wsBinaryMessage, len(p)) - wsMaskBuf(key, p) - n, err = w.w.Write(fh) - total += n - if err == nil { - n, err = w.w.Write(p) - total += n - } - } - if err == nil && w.cm != nil { - n, err = w.writeCloseMsg() - total += n - } - return total, err -} - -func (w *websocketWriter) writeCtrlFrames() (int, error) { - var ( - n int - total int - i int - err error - ) - for ; i < len(w.ctrlFrames); i++ { - buf := w.ctrlFrames[i] - n, err = w.w.Write(buf) - total += n - if err != nil { - break - } - } - if i != len(w.ctrlFrames) { - w.ctrlFrames = w.ctrlFrames[i+1:] - } else { - w.ctrlFrames = w.ctrlFrames[:0] - } - return total, err -} - -func (w *websocketWriter) writeCloseMsg() (int, error) { - n, err := w.w.Write(w.cm) - w.cm, w.noMoreSend = nil, true - return n, err -} - -func wsMaskBuf(key, buf []byte) { - for i := 0; i < len(buf); i++ { - buf[i] ^= key[i&3] - } -} - -// Create the frame header. -// Encodes the frame type and optional compression flag, and the size of the payload. -func wsCreateFrameHeader(compressed bool, frameType wsOpCode, l int) ([]byte, []byte) { - fh := make([]byte, wsMaxFrameHeaderSize) - n, key := wsFillFrameHeader(fh, compressed, frameType, l) - return fh[:n], key -} - -func wsFillFrameHeader(fh []byte, compressed bool, frameType wsOpCode, l int) (int, []byte) { - var n int - b := byte(frameType) - b |= wsFinalBit - if compressed { - b |= wsRsv1Bit - } - b1 := byte(wsMaskBit) - switch { - case l <= 125: - n = 2 - fh[0] = b - fh[1] = b1 | byte(l) - case l < 65536: - n = 4 - fh[0] = b - fh[1] = b1 | 126 - binary.BigEndian.PutUint16(fh[2:], uint16(l)) - default: - n = 10 - fh[0] = b - fh[1] = b1 | 127 - binary.BigEndian.PutUint64(fh[2:], uint64(l)) - } - var key []byte - var keyBuf [4]byte - if _, err := io.ReadFull(rand.Reader, keyBuf[:4]); err != nil { - kv := mrand.Int31() - binary.LittleEndian.PutUint32(keyBuf[:4], uint32(kv)) - } - copy(fh[n:], keyBuf[:4]) - key = fh[n : n+4] - n += 4 - return n, key -} - -func (nc *Conn) wsInitHandshake(u *url.URL) error { - compress := nc.Opts.Compression - tlsRequired := u.Scheme == wsSchemeTLS || nc.Opts.Secure || nc.Opts.TLSConfig != nil || nc.Opts.TLSCertCB != nil || nc.Opts.RootCAsCB != nil - // Do TLS here as needed. - if tlsRequired { - if err := nc.makeTLSConn(); err != nil { - return err - } - } else { - nc.bindToNewConn() - } - - var err error - - // For http request, we need the passed URL to contain either http or https scheme. - scheme := "http" - if tlsRequired { - scheme = "https" - } - ustr := fmt.Sprintf("%s://%s", scheme, u.Host) - - if nc.Opts.ProxyPath != "" { - proxyPath := nc.Opts.ProxyPath - if !strings.HasPrefix(proxyPath, "/") { - proxyPath = "/" + proxyPath - } - ustr += proxyPath - } - - u, err = url.Parse(ustr) - if err != nil { - return err - } - req := &http.Request{ - Method: "GET", - URL: u, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - Header: make(http.Header), - Host: u.Host, - } - wsKey, err := wsMakeChallengeKey() - if err != nil { - return err - } - - req.Header["Upgrade"] = []string{"websocket"} - req.Header["Connection"] = []string{"Upgrade"} - req.Header["Sec-WebSocket-Key"] = []string{wsKey} - req.Header["Sec-WebSocket-Version"] = []string{"13"} - if compress { - req.Header.Add("Sec-WebSocket-Extensions", wsPMCReqHeaderValue) - } - if err := req.Write(nc.conn); err != nil { - return err - } - - var resp *http.Response - - br := bufio.NewReaderSize(nc.conn, 4096) - nc.conn.SetReadDeadline(time.Now().Add(nc.Opts.Timeout)) - resp, err = http.ReadResponse(br, req) - if err == nil && - (resp.StatusCode != 101 || - !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || - !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || - resp.Header.Get("Sec-Websocket-Accept") != wsAcceptKey(wsKey)) { - - err = fmt.Errorf("invalid websocket connection") - } - // Check compression extension... - if err == nil && compress { - // Check that not only permessage-deflate extension is present, but that - // we also have server and client no context take over. - srvCompress, noCtxTakeover := wsPMCExtensionSupport(resp.Header) - - // If server does not support compression, then simply disable it in our side. - if !srvCompress { - compress = false - } else if !noCtxTakeover { - err = fmt.Errorf("compression negotiation error") - } - } - if resp != nil { - resp.Body.Close() - } - nc.conn.SetReadDeadline(time.Time{}) - if err != nil { - return err - } - - wsr := wsNewReader(nc.br.r) - wsr.nc = nc - // We have to slurp whatever is in the bufio reader and copy to br.r - if n := br.Buffered(); n != 0 { - wsr.ib, _ = br.Peek(n) - } - nc.br.r = wsr - nc.bw.w = &websocketWriter{w: nc.bw.w, compress: compress} - nc.ws = true - return nil -} - -func (nc *Conn) wsClose() { - nc.mu.Lock() - defer nc.mu.Unlock() - if !nc.ws { - return - } - nc.wsEnqueueCloseMsgLocked(wsCloseStatusNormalClosure, _EMPTY_) -} - -func (nc *Conn) wsEnqueueCloseMsg(needsLock bool, status int, payload string) { - // In some low-level unit tests it will happen... - if nc == nil { - return - } - if needsLock { - nc.mu.Lock() - defer nc.mu.Unlock() - } - nc.wsEnqueueCloseMsgLocked(status, payload) -} - -func (nc *Conn) wsEnqueueCloseMsgLocked(status int, payload string) { - wr, ok := nc.bw.w.(*websocketWriter) - if !ok || wr.cmDone { - return - } - statusAndPayloadLen := 2 + len(payload) - frame := make([]byte, 2+4+statusAndPayloadLen) - n, key := wsFillFrameHeader(frame, false, wsCloseMessage, statusAndPayloadLen) - // Set the status - binary.BigEndian.PutUint16(frame[n:], uint16(status)) - // If there is a payload, copy - if len(payload) > 0 { - copy(frame[n+2:], payload) - } - // Mask status + payload - wsMaskBuf(key, frame[n:n+statusAndPayloadLen]) - wr.cm = frame - wr.cmDone = true - nc.bw.flush() - if c := wr.compressor; c != nil { - c.Close() - } -} - -func (nc *Conn) wsEnqueueControlMsg(needsLock bool, frameType wsOpCode, payload []byte) { - // In some low-level unit tests it will happen... - if nc == nil { - return - } - if needsLock { - nc.mu.Lock() - defer nc.mu.Unlock() - } - wr, ok := nc.bw.w.(*websocketWriter) - if !ok { - return - } - fh, key := wsCreateFrameHeader(false, frameType, len(payload)) - wr.ctrlFrames = append(wr.ctrlFrames, fh) - if len(payload) > 0 { - wsMaskBuf(key, payload) - wr.ctrlFrames = append(wr.ctrlFrames, payload) - } - nc.bw.flush() -} - -func wsPMCExtensionSupport(header http.Header) (bool, bool) { - for _, extensionList := range header["Sec-Websocket-Extensions"] { - extensions := strings.Split(extensionList, ",") - for _, extension := range extensions { - extension = strings.Trim(extension, " \t") - params := strings.Split(extension, ";") - for i, p := range params { - p = strings.Trim(p, " \t") - if strings.EqualFold(p, wsPMCExtension) { - var snc bool - var cnc bool - for j := i + 1; j < len(params); j++ { - p = params[j] - p = strings.Trim(p, " \t") - if strings.EqualFold(p, wsPMCSrvNoCtx) { - snc = true - } else if strings.EqualFold(p, wsPMCCliNoCtx) { - cnc = true - } - if snc && cnc { - return true, true - } - } - return true, false - } - } - } - } - return false, false -} - -func wsMakeChallengeKey() (string, error) { - p := make([]byte, 16) - if _, err := io.ReadFull(rand.Reader, p); err != nil { - return "", err - } - return base64.StdEncoding.EncodeToString(p), nil -} - -func wsAcceptKey(key string) string { - h := sha1.New() - h.Write([]byte(key)) - h.Write(wsGUID) - return base64.StdEncoding.EncodeToString(h.Sum(nil)) -} - -// Returns true if the op code corresponds to a control frame. -func wsIsControlFrame(frameType wsOpCode) bool { - return frameType >= wsCloseMessage -} - -func isWebsocketScheme(u *url.URL) bool { - return u.Scheme == wsScheme || u.Scheme == wsSchemeTLS -} diff --git a/vendor/github.com/nats-io/nkeys/.gitignore b/vendor/github.com/nats-io/nkeys/.gitignore deleted file mode 100644 index d23676d2..00000000 --- a/vendor/github.com/nats-io/nkeys/.gitignore +++ /dev/null @@ -1,16 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib -build/ - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ -.idea/ diff --git a/vendor/github.com/nats-io/nkeys/.goreleaser.yml b/vendor/github.com/nats-io/nkeys/.goreleaser.yml deleted file mode 100644 index e5c4f154..00000000 --- a/vendor/github.com/nats-io/nkeys/.goreleaser.yml +++ /dev/null @@ -1,63 +0,0 @@ -project_name: nkeys -release: - github: - owner: nats-io - name: nkeys - name_template: '{{.Tag}}' - draft: true -builds: - - id: nk - main: ./nk/main.go - ldflags: "-X main.Version={{.Tag}}_{{.Commit}}" - binary: nk - goos: - - darwin - - linux - - windows - - freebsd - goarch: - - amd64 - - arm - - arm64 - - 386 - - mips64le - - s390x - goarm: - - 6 - - 7 - ignore: - - goos: darwin - goarch: 386 - - goos: freebsd - goarch: arm - - goos: freebsd - goarch: arm64 - - goos: freebsd - goarch: 386 - -dist: build - -archives: - - name_template: '{{ .ProjectName }}-v{{ .Version }}-{{ .Os }}-{{ .Arch }}{{ if .Arm - }}v{{ .Arm }}{{ end }}' - wrap_in_directory: true - format: zip - files: - - README.md - - LICENSE - -checksum: - name_template: '{{ .ProjectName }}-v{{ .Version }}-checksums.txt' - -snapshot: - name_template: 'dev' - -nfpms: - - file_name_template: '{{ .ProjectName }}-v{{ .Version }}-{{ .Arch }}{{ if .Arm - }}v{{ .Arm }}{{ end }}' - maintainer: nats.io - description: NKeys utility cli program - vendor: nats-io - bindir: /usr/local/bin - formats: - - deb \ No newline at end of file diff --git a/vendor/github.com/nats-io/nkeys/GOVERNANCE.md b/vendor/github.com/nats-io/nkeys/GOVERNANCE.md deleted file mode 100644 index 744d3bc2..00000000 --- a/vendor/github.com/nats-io/nkeys/GOVERNANCE.md +++ /dev/null @@ -1,3 +0,0 @@ -# NATS NKEYS Governance - -NATS NKEYS is part of the NATS project and is subject to the [NATS Governance](https://github.com/nats-io/nats-general/blob/master/GOVERNANCE.md). \ No newline at end of file diff --git a/vendor/github.com/nats-io/nkeys/LICENSE b/vendor/github.com/nats-io/nkeys/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/vendor/github.com/nats-io/nkeys/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/nats-io/nkeys/MAINTAINERS.md b/vendor/github.com/nats-io/nkeys/MAINTAINERS.md deleted file mode 100644 index 23214655..00000000 --- a/vendor/github.com/nats-io/nkeys/MAINTAINERS.md +++ /dev/null @@ -1,8 +0,0 @@ -# Maintainers - -Maintainership is on a per project basis. - -### Maintainers - - Derek Collison [@derekcollison](https://github.com/derekcollison) - - Ivan Kozlovic [@kozlovic](https://github.com/kozlovic) - - Waldemar Quevedo [@wallyqs](https://github.com/wallyqs) diff --git a/vendor/github.com/nats-io/nkeys/README.md b/vendor/github.com/nats-io/nkeys/README.md deleted file mode 100644 index 37febc9a..00000000 --- a/vendor/github.com/nats-io/nkeys/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# NKEYS - -[![License Apache 2](https://img.shields.io/badge/License-Apache2-blue.svg)](https://www.apache.org/licenses/LICENSE-2.0) -[![Go Report Card](https://goreportcard.com/badge/github.com/nats-io/nkeys)](https://goreportcard.com/report/github.com/nats-io/nkeys) -[![Build Status](https://app.travis-ci.com/nats-io/nkeys.svg?branch=master)](https://app.travis-ci.com/nats-io/nkeys) -[![GoDoc](https://godoc.org/github.com/nats-io/nkeys?status.svg)](https://godoc.org/github.com/nats-io/nkeys) -[![Coverage Status](https://coveralls.io/repos/github/nats-io/nkeys/badge.svg?branch=master&service=github)](https://coveralls.io/github/nats-io/nkeys?branch=master) - -A public-key signature system based on [Ed25519](https://ed25519.cr.yp.to/) for the NATS ecosystem. - -## About - -The NATS ecosystem will be moving to [Ed25519](https://ed25519.cr.yp.to/) keys for identity, authentication and authorization for entities such as Accounts, Users, Servers and Clusters. - -Ed25519 is fast and resistant to side channel attacks. Generation of a seed key is all that is needed to be stored and kept safe, as the seed can generate both the public and private keys. - -The NATS system will utilize Ed25519 keys, meaning that NATS systems will never store or even have access to any private keys. Authentication will utilize a random challenge response mechanism. - -Dealing with 32 byte and 64 byte raw keys can be challenging. NKEYS is designed to formulate keys in a much friendlier fashion and references work done in cryptocurrencies, specifically [Stellar](https://www.stellar.org/). Bitcoin and others used a form of Base58 (or Base58Check) to encode raw keys. Stellar utilized a more traditional Base32 with a CRC16 and a version or prefix byte. NKEYS utilizes a similar format where the prefix will be 1 byte for public and private keys and will be 2 bytes for seeds. The base32 encoding of these prefixes will yield friendly human readable prefixes, e.g. '**N**' = server, '**C**' = cluster, '**O**' = operator, '**A**' = account, and '**U**' = user. '**P**' is used for private keys. For seeds, the first encoded prefix is '**S**', and the second character will be the type for the public key, e.g. "**SU**" is a seed for a user key pair, "**SA**" is a seed for an account key pair. - -## Installation - -Use the `go` command: - - $ go get github.com/nats-io/nkeys - -## nk - Command Line Utility - -Located under the nk [directory](https://github.com/nats-io/nkeys/tree/master/nk). - -## Basic API Usage -```go - -// Create a new User KeyPair -user, _ := nkeys.CreateUser() - -// Sign some data with a full key pair user. -data := []byte("Hello World") -sig, _ := user.Sign(data) - -// Verify the signature. -err = user.Verify(data, sig) - -// Access the seed, the only thing that needs to be stored and kept safe. -// seed = "SUAKYRHVIOREXV7EUZTBHUHL7NUMHPMAS7QMDU3GTIUWEI5LDNOXD43IZY" -seed, _ := user.Seed() - -// Access the public key which can be shared. -// publicKey = "UD466L6EBCM3YY5HEGHJANNTN4LSKTSUXTH7RILHCKEQMQHTBNLHJJXT" -publicKey, _ := user.PublicKey() - -// Create a full User who can sign and verify from a private seed. -user, _ = nkeys.FromSeed(seed) - -// Create a User who can only verify signatures via a public key. -user, _ = nkeys.FromPublicKey(publicKey) - -// Create a User KeyPair with our own random data. -var rawSeed [32]byte -_, err := io.ReadFull(rand.Reader, rawSeed[:]) // Or some other random source. -user2, _ := nkeys.FromRawSeed(PrefixByteUser, rawSeed) - -``` - -## License - -Unless otherwise noted, the NATS source files are distributed -under the Apache Version 2.0 license found in the LICENSE file. - diff --git a/vendor/github.com/nats-io/nkeys/TODO.md b/vendor/github.com/nats-io/nkeys/TODO.md deleted file mode 100644 index 2649c9e5..00000000 --- a/vendor/github.com/nats-io/nkeys/TODO.md +++ /dev/null @@ -1,5 +0,0 @@ - -# General - -- [ ] Child key derivation -- [ ] Hardware support, e.g. YubiHSM diff --git a/vendor/github.com/nats-io/nkeys/crc16.go b/vendor/github.com/nats-io/nkeys/crc16.go deleted file mode 100644 index fbe38fbc..00000000 --- a/vendor/github.com/nats-io/nkeys/crc16.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2018 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nkeys - -// An implementation of crc16 according to CCITT standards for XMODEM. - -var crc16tab = [256]uint16{ - 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, - 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, - 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, - 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, - 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, - 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, - 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, - 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, - 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, - 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, - 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, - 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, - 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, - 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, - 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, - 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, - 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, - 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, - 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, - 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, - 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, - 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, - 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, - 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, - 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, - 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, - 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, - 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, - 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, - 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, - 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, - 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0, -} - -// crc16 returns the 2-byte crc for the data provided. -func crc16(data []byte) uint16 { - var crc uint16 - for _, b := range data { - crc = ((crc << 8) & 0xffff) ^ crc16tab[((crc>>8)^uint16(b))&0x00FF] - } - return crc -} - -// validate will check the calculated crc16 checksum for data against the expected. -func validate(data []byte, expected uint16) error { - if crc16(data) != expected { - return ErrInvalidChecksum - } - return nil -} diff --git a/vendor/github.com/nats-io/nkeys/creds_utils.go b/vendor/github.com/nats-io/nkeys/creds_utils.go deleted file mode 100644 index ecd94631..00000000 --- a/vendor/github.com/nats-io/nkeys/creds_utils.go +++ /dev/null @@ -1,78 +0,0 @@ -package nkeys - -import ( - "bytes" - "regexp" - "strings" -) - -var userConfigRE = regexp.MustCompile(`\s*(?:(?:[-]{3,}.*[-]{3,}\r?\n)([\w\-.=]+)(?:\r?\n[-]{3,}.*[-]{3,}\r?\n))`) - -// ParseDecoratedJWT takes a creds file and returns the JWT portion. -func ParseDecoratedJWT(contents []byte) (string, error) { - items := userConfigRE.FindAllSubmatch(contents, -1) - if len(items) == 0 { - return string(contents), nil - } - // First result should be the user JWT. - // We copy here so that if the file contained a seed file too we wipe appropriately. - raw := items[0][1] - tmp := make([]byte, len(raw)) - copy(tmp, raw) - return strings.TrimSpace(string(tmp)), nil -} - -// ParseDecoratedNKey takes a creds file, finds the NKey portion and creates a -// key pair from it. -func ParseDecoratedNKey(contents []byte) (KeyPair, error) { - var seed []byte - - items := userConfigRE.FindAllSubmatch(contents, -1) - if len(items) > 1 { - seed = items[1][1] - } else { - lines := bytes.Split(contents, []byte("\n")) - for _, line := range lines { - if bytes.HasPrefix(bytes.TrimSpace(line), []byte("SO")) || - bytes.HasPrefix(bytes.TrimSpace(line), []byte("SA")) || - bytes.HasPrefix(bytes.TrimSpace(line), []byte("SU")) { - seed = line - break - } - } - } - if seed == nil { - return nil, ErrNoSeedFound - } - if !bytes.HasPrefix(seed, []byte("SO")) && - !bytes.HasPrefix(seed, []byte("SA")) && - !bytes.HasPrefix(seed, []byte("SU")) { - return nil, ErrInvalidNkeySeed - } - kp, err := FromSeed(seed) - if err != nil { - return nil, err - } - return kp, nil -} - -// ParseDecoratedUserNKey takes a creds file, finds the NKey portion and creates a -// key pair from it. Similar to ParseDecoratedNKey but fails for non-user keys. -func ParseDecoratedUserNKey(contents []byte) (KeyPair, error) { - nk, err := ParseDecoratedNKey(contents) - if err != nil { - return nil, err - } - seed, err := nk.Seed() - if err != nil { - return nil, err - } - if !bytes.HasPrefix(seed, []byte("SU")) { - return nil, ErrInvalidUserSeed - } - kp, err := FromSeed(seed) - if err != nil { - return nil, err - } - return kp, nil -} diff --git a/vendor/github.com/nats-io/nkeys/dependencies.md b/vendor/github.com/nats-io/nkeys/dependencies.md deleted file mode 100644 index 370184aa..00000000 --- a/vendor/github.com/nats-io/nkeys/dependencies.md +++ /dev/null @@ -1,12 +0,0 @@ -# External Dependencies - -This file lists the dependencies used in this repository. - -| Dependency | License | -|-|-| -| Go | BSD 3-Clause "New" or "Revised" License | -| golang.org/x/crypto v0.3.0 | BSD 3-Clause "New" or "Revised" License | -| golang.org/x/net v0.2.0 | BSD 3-Clause "New" or "Revised" License | -| golang.org/x/sys v0.2.0 | BSD 3-Clause "New" or "Revised" License | -| golang.org/x/term v0.2.0 | BSD 3-Clause "New" or "Revised" License | -| golang.org/x/text v0.4.0 | BSD 3-Clause "New" or "Revised" License | diff --git a/vendor/github.com/nats-io/nkeys/errors.go b/vendor/github.com/nats-io/nkeys/errors.go deleted file mode 100644 index a30bb96e..00000000 --- a/vendor/github.com/nats-io/nkeys/errors.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2022 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nkeys - -// Errors -const ( - ErrInvalidPrefixByte = nkeysError("nkeys: invalid prefix byte") - ErrInvalidKey = nkeysError("nkeys: invalid key") - ErrInvalidPublicKey = nkeysError("nkeys: invalid public key") - ErrInvalidPrivateKey = nkeysError("nkeys: invalid private key") - ErrInvalidSeedLen = nkeysError("nkeys: invalid seed length") - ErrInvalidSeed = nkeysError("nkeys: invalid seed") - ErrInvalidEncoding = nkeysError("nkeys: invalid encoded key") - ErrInvalidSignature = nkeysError("nkeys: signature verification failed") - ErrCannotSign = nkeysError("nkeys: can not sign, no private key available") - ErrPublicKeyOnly = nkeysError("nkeys: no seed or private key available") - ErrIncompatibleKey = nkeysError("nkeys: incompatible key") - ErrInvalidChecksum = nkeysError("nkeys: invalid checksum") - ErrNoSeedFound = nkeysError("nkeys: no nkey seed found") - ErrInvalidNkeySeed = nkeysError("nkeys: doesn't contain a seed nkey") - ErrInvalidUserSeed = nkeysError("nkeys: doesn't contain an user seed nkey") - ErrInvalidRecipient = nkeysError("nkeys: not a valid recipient public curve key") - ErrInvalidSender = nkeysError("nkeys: not a valid sender public curve key") - ErrInvalidCurveKey = nkeysError("nkeys: not a valid curve key") - ErrInvalidCurveSeed = nkeysError("nkeys: not a valid curve seed") - ErrInvalidEncrypted = nkeysError("nkeys: encrypted input is not valid") - ErrInvalidEncVersion = nkeysError("nkeys: encrypted input wrong version") - ErrCouldNotDecrypt = nkeysError("nkeys: could not decrypt input") - ErrInvalidCurveKeyOperation = nkeysError("nkeys: curve key is not valid for sign/verify") - ErrInvalidNKeyOperation = nkeysError("nkeys: only curve key can seal/open") - ErrCannotOpen = nkeysError("nkeys: cannot open no private curve key available") - ErrCannotSeal = nkeysError("nkeys: cannot seal no private curve key available") -) - -type nkeysError string - -func (e nkeysError) Error() string { - return string(e) -} diff --git a/vendor/github.com/nats-io/nkeys/keypair.go b/vendor/github.com/nats-io/nkeys/keypair.go deleted file mode 100644 index 9d055180..00000000 --- a/vendor/github.com/nats-io/nkeys/keypair.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2018-2022 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nkeys - -import ( - "bytes" - "crypto/rand" - "io" - - "golang.org/x/crypto/ed25519" -) - -// kp is the internal struct for a kepypair using seed. -type kp struct { - seed []byte -} - -// All seeds are 32 bytes long. -const seedLen = 32 - -// CreatePair will create a KeyPair based on the rand entropy and a type/prefix byte. -func CreatePair(prefix PrefixByte) (KeyPair, error) { - return CreatePairWithRand(prefix, rand.Reader) -} - -// CreatePair will create a KeyPair based on the rand reader and a type/prefix byte. rand can be nil. -func CreatePairWithRand(prefix PrefixByte, rr io.Reader) (KeyPair, error) { - if prefix == PrefixByteCurve { - return CreateCurveKeysWithRand(rr) - } - if rr == nil { - rr = rand.Reader - } - var rawSeed [seedLen]byte - - _, err := io.ReadFull(rr, rawSeed[:]) - if err != nil { - return nil, err - } - - seed, err := EncodeSeed(prefix, rawSeed[:]) - if err != nil { - return nil, err - } - return &kp{seed}, nil -} - -// rawSeed will return the raw, decoded 64 byte seed. -func (pair *kp) rawSeed() ([]byte, error) { - _, raw, err := DecodeSeed(pair.seed) - return raw, err -} - -// keys will return a 32 byte public key and a 64 byte private key utilizing the seed. -func (pair *kp) keys() (ed25519.PublicKey, ed25519.PrivateKey, error) { - raw, err := pair.rawSeed() - if err != nil { - return nil, nil, err - } - return ed25519.GenerateKey(bytes.NewReader(raw)) -} - -// Wipe will randomize the contents of the seed key -func (pair *kp) Wipe() { - io.ReadFull(rand.Reader, pair.seed) - pair.seed = nil -} - -// Seed will return the encoded seed. -func (pair *kp) Seed() ([]byte, error) { - return pair.seed, nil -} - -// PublicKey will return the encoded public key associated with the KeyPair. -// All KeyPairs have a public key. -func (pair *kp) PublicKey() (string, error) { - public, raw, err := DecodeSeed(pair.seed) - if err != nil { - return "", err - } - pub, _, err := ed25519.GenerateKey(bytes.NewReader(raw)) - if err != nil { - return "", err - } - pk, err := Encode(public, pub) - if err != nil { - return "", err - } - return string(pk), nil -} - -// PrivateKey will return the encoded private key for KeyPair. -func (pair *kp) PrivateKey() ([]byte, error) { - _, priv, err := pair.keys() - if err != nil { - return nil, err - } - return Encode(PrefixBytePrivate, priv) -} - -// Sign will sign the input with KeyPair's private key. -func (pair *kp) Sign(input []byte) ([]byte, error) { - _, priv, err := pair.keys() - if err != nil { - return nil, err - } - return ed25519.Sign(priv, input), nil -} - -// Verify will verify the input against a signature utilizing the public key. -func (pair *kp) Verify(input []byte, sig []byte) error { - pub, _, err := pair.keys() - if err != nil { - return err - } - if !ed25519.Verify(pub, input, sig) { - return ErrInvalidSignature - } - return nil -} - -// Seal is only supported on CurveKeyPair -func (pair *kp) Seal(input []byte, recipient string) ([]byte, error) { - return nil, ErrInvalidNKeyOperation -} - -// SealWithRand is only supported on CurveKeyPair -func (pair *kp) SealWithRand(input []byte, recipient string, rr io.Reader) ([]byte, error) { - return nil, ErrInvalidNKeyOperation -} - -// Open is only supported on CurveKey -func (pair *kp) Open(input []byte, sender string) ([]byte, error) { - return nil, ErrInvalidNKeyOperation -} diff --git a/vendor/github.com/nats-io/nkeys/nkeys.go b/vendor/github.com/nats-io/nkeys/nkeys.go deleted file mode 100644 index b83a99d4..00000000 --- a/vendor/github.com/nats-io/nkeys/nkeys.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2018-2019 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package nkeys is an Ed25519 based public-key signature system that simplifies keys and seeds -// and performs signing and verification. -// It also supports encryption via x25519 keys and is compatible with https://pkg.go.dev/golang.org/x/crypto/nacl/box. -package nkeys - -import "io" - -// Version is our current version -const Version = "0.4.5" - -// KeyPair provides the central interface to nkeys. -type KeyPair interface { - Seed() ([]byte, error) - PublicKey() (string, error) - PrivateKey() ([]byte, error) - // Sign is only supported on Non CurveKeyPairs - Sign(input []byte) ([]byte, error) - // Verify is only supported on Non CurveKeyPairs - Verify(input []byte, sig []byte) error - Wipe() - // Seal is only supported on CurveKeyPair - Seal(input []byte, recipient string) ([]byte, error) - // SealWithRand is only supported on CurveKeyPair - SealWithRand(input []byte, recipient string, rr io.Reader) ([]byte, error) - // Open is only supported on CurveKey - Open(input []byte, sender string) ([]byte, error) -} - -// CreateUser will create a User typed KeyPair. -func CreateUser() (KeyPair, error) { - return CreatePair(PrefixByteUser) -} - -// CreateAccount will create an Account typed KeyPair. -func CreateAccount() (KeyPair, error) { - return CreatePair(PrefixByteAccount) -} - -// CreateServer will create a Server typed KeyPair. -func CreateServer() (KeyPair, error) { - return CreatePair(PrefixByteServer) -} - -// CreateCluster will create a Cluster typed KeyPair. -func CreateCluster() (KeyPair, error) { - return CreatePair(PrefixByteCluster) -} - -// CreateOperator will create an Operator typed KeyPair. -func CreateOperator() (KeyPair, error) { - return CreatePair(PrefixByteOperator) -} - -// FromPublicKey will create a KeyPair capable of verifying signatures. -func FromPublicKey(public string) (KeyPair, error) { - raw, err := decode([]byte(public)) - if err != nil { - return nil, err - } - pre := PrefixByte(raw[0]) - if err := checkValidPublicPrefixByte(pre); err != nil { - return nil, ErrInvalidPublicKey - } - return &pub{pre, raw[1:]}, nil -} - -// FromSeed will create a KeyPair capable of signing and verifying signatures. -func FromSeed(seed []byte) (KeyPair, error) { - prefix, _, err := DecodeSeed(seed) - if err != nil { - return nil, err - } - if prefix == PrefixByteCurve { - return FromCurveSeed(seed) - } - copy := append([]byte{}, seed...) - return &kp{copy}, nil -} - -// FromRawSeed will create a KeyPair from the raw 32 byte seed for a given type. -func FromRawSeed(prefix PrefixByte, rawSeed []byte) (KeyPair, error) { - seed, err := EncodeSeed(prefix, rawSeed) - if err != nil { - return nil, err - } - return &kp{seed}, nil -} diff --git a/vendor/github.com/nats-io/nkeys/public.go b/vendor/github.com/nats-io/nkeys/public.go deleted file mode 100644 index c3cd21ed..00000000 --- a/vendor/github.com/nats-io/nkeys/public.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2018 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nkeys - -import ( - "crypto/rand" - "io" - - "golang.org/x/crypto/ed25519" -) - -// A KeyPair from a public key capable of verifying only. -type pub struct { - pre PrefixByte - pub ed25519.PublicKey -} - -// PublicKey will return the encoded public key associated with the KeyPair. -// All KeyPairs have a public key. -func (p *pub) PublicKey() (string, error) { - pk, err := Encode(p.pre, p.pub) - if err != nil { - return "", err - } - return string(pk), nil -} - -// Seed will return an error since this is not available for public key only KeyPairs. -func (p *pub) Seed() ([]byte, error) { - return nil, ErrPublicKeyOnly -} - -// PrivateKey will return an error since this is not available for public key only KeyPairs. -func (p *pub) PrivateKey() ([]byte, error) { - return nil, ErrPublicKeyOnly -} - -// Sign will return an error since this is not available for public key only KeyPairs. -func (p *pub) Sign(input []byte) ([]byte, error) { - return nil, ErrCannotSign -} - -// Verify will verify the input against a signature utilizing the public key. -func (p *pub) Verify(input []byte, sig []byte) error { - if !ed25519.Verify(p.pub, input, sig) { - return ErrInvalidSignature - } - return nil -} - -// Wipe will randomize the public key and erase the pre byte. -func (p *pub) Wipe() { - p.pre = '0' - io.ReadFull(rand.Reader, p.pub) -} - -func (p *pub) Seal(input []byte, recipient string) ([]byte, error) { - if p.pre == PrefixByteCurve { - return nil, ErrCannotSeal - } - return nil, ErrInvalidNKeyOperation -} -func (p *pub) SealWithRand(input []byte, _recipient string, rr io.Reader) ([]byte, error) { - if p.pre == PrefixByteCurve { - return nil, ErrCannotSeal - } - return nil, ErrInvalidNKeyOperation -} - -func (p *pub) Open(input []byte, sender string) ([]byte, error) { - if p.pre == PrefixByteCurve { - return nil, ErrCannotOpen - } - return nil, ErrInvalidNKeyOperation -} diff --git a/vendor/github.com/nats-io/nkeys/strkey.go b/vendor/github.com/nats-io/nkeys/strkey.go deleted file mode 100644 index 8ae33116..00000000 --- a/vendor/github.com/nats-io/nkeys/strkey.go +++ /dev/null @@ -1,314 +0,0 @@ -// Copyright 2018-2023 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nkeys - -import ( - "bytes" - "encoding/base32" - "encoding/binary" -) - -// PrefixByte is a lead byte representing the type. -type PrefixByte byte - -const ( - // PrefixByteSeed is the version byte used for encoded NATS Seeds - PrefixByteSeed PrefixByte = 18 << 3 // Base32-encodes to 'S...' - - // PrefixBytePrivate is the version byte used for encoded NATS Private keys - PrefixBytePrivate PrefixByte = 15 << 3 // Base32-encodes to 'P...' - - // PrefixByteServer is the version byte used for encoded NATS Servers - PrefixByteServer PrefixByte = 13 << 3 // Base32-encodes to 'N...' - - // PrefixByteCluster is the version byte used for encoded NATS Clusters - PrefixByteCluster PrefixByte = 2 << 3 // Base32-encodes to 'C...' - - // PrefixByteOperator is the version byte used for encoded NATS Operators - PrefixByteOperator PrefixByte = 14 << 3 // Base32-encodes to 'O...' - - // PrefixByteAccount is the version byte used for encoded NATS Accounts - PrefixByteAccount PrefixByte = 0 // Base32-encodes to 'A...' - - // PrefixByteUser is the version byte used for encoded NATS Users - PrefixByteUser PrefixByte = 20 << 3 // Base32-encodes to 'U...' - - // PrefixByteCurve is the version byte used for encoded CurveKeys (X25519) - PrefixByteCurve PrefixByte = 23 << 3 // Base32-encodes to 'X...' - - // PrefixByteUnknown is for unknown prefixes. - PrefixByteUnknown PrefixByte = 25 << 3 // Base32-encodes to 'Z...' -) - -// Set our encoding to not include padding '==' -var b32Enc = base32.StdEncoding.WithPadding(base32.NoPadding) - -// Encode will encode a raw key or seed with the prefix and crc16 and then base32 encoded. -func Encode(prefix PrefixByte, src []byte) ([]byte, error) { - if err := checkValidPrefixByte(prefix); err != nil { - return nil, err - } - - var raw bytes.Buffer - - // write prefix byte - if err := raw.WriteByte(byte(prefix)); err != nil { - return nil, err - } - - // write payload - if _, err := raw.Write(src); err != nil { - return nil, err - } - - // Calculate and write crc16 checksum - err := binary.Write(&raw, binary.LittleEndian, crc16(raw.Bytes())) - if err != nil { - return nil, err - } - - data := raw.Bytes() - buf := make([]byte, b32Enc.EncodedLen(len(data))) - b32Enc.Encode(buf, data) - return buf[:], nil -} - -// EncodeSeed will encode a raw key with the prefix and then seed prefix and crc16 and then base32 encoded. -// `src` must be 32 bytes long (ed25519.SeedSize). -func EncodeSeed(public PrefixByte, src []byte) ([]byte, error) { - if err := checkValidPublicPrefixByte(public); err != nil { - return nil, err - } - - if len(src) != seedLen { - return nil, ErrInvalidSeedLen - } - - // In order to make this human printable for both bytes, we need to do a little - // bit manipulation to setup for base32 encoding which takes 5 bits at a time. - b1 := byte(PrefixByteSeed) | (byte(public) >> 5) - b2 := (byte(public) & 31) << 3 // 31 = 00011111 - - var raw bytes.Buffer - - raw.WriteByte(b1) - raw.WriteByte(b2) - - // write payload - if _, err := raw.Write(src); err != nil { - return nil, err - } - - // Calculate and write crc16 checksum - err := binary.Write(&raw, binary.LittleEndian, crc16(raw.Bytes())) - if err != nil { - return nil, err - } - - data := raw.Bytes() - buf := make([]byte, b32Enc.EncodedLen(len(data))) - b32Enc.Encode(buf, data) - return buf, nil -} - -// IsValidEncoding will tell you if the encoding is a valid key. -func IsValidEncoding(src []byte) bool { - _, err := decode(src) - return err == nil -} - -// decode will decode the base32 and check crc16 and the prefix for validity. -func decode(src []byte) ([]byte, error) { - raw := make([]byte, b32Enc.DecodedLen(len(src))) - n, err := b32Enc.Decode(raw, src) - if err != nil { - return nil, err - } - raw = raw[:n] - - if n < 4 { - return nil, ErrInvalidEncoding - } - - crc := binary.LittleEndian.Uint16(raw[n-2:]) - - // ensure checksum is valid - if err := validate(raw[0:n-2], crc); err != nil { - return nil, err - } - - return raw[:n-2], nil -} - -// Decode will decode the base32 string and check crc16 and enforce the prefix is what is expected. -func Decode(expectedPrefix PrefixByte, src []byte) ([]byte, error) { - if err := checkValidPrefixByte(expectedPrefix); err != nil { - return nil, err - } - raw, err := decode(src) - if err != nil { - return nil, err - } - b1 := raw[0] & 248 // 248 = 11111000 - if prefix := PrefixByte(b1); prefix != expectedPrefix { - return nil, ErrInvalidPrefixByte - } - return raw[1:], nil -} - -// DecodeSeed will decode the base32 string and check crc16 and enforce the prefix is a seed -// and the subsequent type is a valid type. -func DecodeSeed(src []byte) (PrefixByte, []byte, error) { - raw, err := decode(src) - if err != nil { - return PrefixByteSeed, nil, err - } - // Need to do the reverse here to get back to internal representation. - b1 := raw[0] & 248 // 248 = 11111000 - b2 := (raw[0]&7)<<5 | ((raw[1] & 248) >> 3) // 7 = 00000111 - - if PrefixByte(b1) != PrefixByteSeed { - return PrefixByteSeed, nil, ErrInvalidSeed - } - if checkValidPublicPrefixByte(PrefixByte(b2)) != nil { - return PrefixByteSeed, nil, ErrInvalidSeed - } - return PrefixByte(b2), raw[2:], nil -} - -// Prefix returns PrefixBytes of its input -func Prefix(src string) PrefixByte { - b, err := decode([]byte(src)) - if err != nil { - return PrefixByteUnknown - } - prefix := PrefixByte(b[0]) - err = checkValidPrefixByte(prefix) - if err == nil { - return prefix - } - // Might be a seed. - b1 := b[0] & 248 - if PrefixByte(b1) == PrefixByteSeed { - return PrefixByteSeed - } - return PrefixByteUnknown -} - -// IsValidPublicKey will decode and verify that the string is a valid encoded public key. -func IsValidPublicKey(src string) bool { - b, err := decode([]byte(src)) - if err != nil { - return false - } - if prefix := PrefixByte(b[0]); checkValidPublicPrefixByte(prefix) != nil { - return false - } - return true -} - -// IsValidPublicUserKey will decode and verify the string is a valid encoded Public User Key. -func IsValidPublicUserKey(src string) bool { - _, err := Decode(PrefixByteUser, []byte(src)) - return err == nil -} - -// IsValidPublicAccountKey will decode and verify the string is a valid encoded Public Account Key. -func IsValidPublicAccountKey(src string) bool { - _, err := Decode(PrefixByteAccount, []byte(src)) - return err == nil -} - -// IsValidPublicServerKey will decode and verify the string is a valid encoded Public Server Key. -func IsValidPublicServerKey(src string) bool { - _, err := Decode(PrefixByteServer, []byte(src)) - return err == nil -} - -// IsValidPublicClusterKey will decode and verify the string is a valid encoded Public Cluster Key. -func IsValidPublicClusterKey(src string) bool { - _, err := Decode(PrefixByteCluster, []byte(src)) - return err == nil -} - -// IsValidPublicOperatorKey will decode and verify the string is a valid encoded Public Operator Key. -func IsValidPublicOperatorKey(src string) bool { - _, err := Decode(PrefixByteOperator, []byte(src)) - return err == nil -} - -// IsValidPublicCurveKey will decode and verify the string is a valid encoded Public Curve Key. -func IsValidPublicCurveKey(src string) bool { - _, err := Decode(PrefixByteCurve, []byte(src)) - return err == nil -} - -// checkValidPrefixByte returns an error if the provided value -// is not one of the defined valid prefix byte constants. -func checkValidPrefixByte(prefix PrefixByte) error { - switch prefix { - case PrefixByteOperator, PrefixByteServer, PrefixByteCluster, - PrefixByteAccount, PrefixByteUser, PrefixByteSeed, PrefixBytePrivate, PrefixByteCurve: - return nil - } - return ErrInvalidPrefixByte -} - -// checkValidPublicPrefixByte returns an error if the provided value -// is not one of the public defined valid prefix byte constants. -func checkValidPublicPrefixByte(prefix PrefixByte) error { - switch prefix { - case PrefixByteOperator, PrefixByteServer, PrefixByteCluster, PrefixByteAccount, PrefixByteUser, PrefixByteCurve: - return nil - } - return ErrInvalidPrefixByte -} - -func (p PrefixByte) String() string { - switch p { - case PrefixByteOperator: - return "operator" - case PrefixByteServer: - return "server" - case PrefixByteCluster: - return "cluster" - case PrefixByteAccount: - return "account" - case PrefixByteUser: - return "user" - case PrefixByteSeed: - return "seed" - case PrefixBytePrivate: - return "private" - case PrefixByteCurve: - return "x25519" - } - return "unknown" -} - -// CompatibleKeyPair returns an error if the KeyPair doesn't match expected PrefixByte(s) -func CompatibleKeyPair(kp KeyPair, expected ...PrefixByte) error { - pk, err := kp.PublicKey() - if err != nil { - return err - } - pkType := Prefix(pk) - for _, k := range expected { - if pkType == k { - return nil - } - } - - return ErrIncompatibleKey -} diff --git a/vendor/github.com/nats-io/nkeys/xkeys.go b/vendor/github.com/nats-io/nkeys/xkeys.go deleted file mode 100644 index 54e50b9d..00000000 --- a/vendor/github.com/nats-io/nkeys/xkeys.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2022 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nkeys - -import ( - "bytes" - "crypto/rand" - "encoding/binary" - "io" - - "golang.org/x/crypto/curve25519" - "golang.org/x/crypto/nacl/box" -) - -// This package will support safe use of X25519 keys for asymmetric encryption. -// We will be compatible with nacl.Box, but generate random nonces automatically. -// We may add more advanced options in the future for group recipients and better -// end to end algorithms. - -const ( - curveKeyLen = 32 - curveDecodeLen = 35 - curveNonceLen = 24 -) - -type ckp struct { - seed [curveKeyLen]byte // Private raw key. -} - -// CreateUser will create a User typed KeyPair. -func CreateCurveKeys() (KeyPair, error) { - return CreateCurveKeysWithRand(rand.Reader) -} - -// CreateUser will create a User typed KeyPair with specified rand source. -func CreateCurveKeysWithRand(rr io.Reader) (KeyPair, error) { - var kp ckp - _, err := io.ReadFull(rr, kp.seed[:]) - if err != nil { - return nil, err - } - return &kp, nil -} - -// Will create a curve key pair from seed. -func FromCurveSeed(seed []byte) (KeyPair, error) { - pb, raw, err := DecodeSeed(seed) - if err != nil { - return nil, err - } - if pb != PrefixByteCurve || len(raw) != curveKeyLen { - return nil, ErrInvalidCurveSeed - } - var kp ckp - copy(kp.seed[:], raw) - return &kp, nil -} - -// Seed will return the encoded seed. -func (pair *ckp) Seed() ([]byte, error) { - return EncodeSeed(PrefixByteCurve, pair.seed[:]) -} - -// PublicKey will return the encoded public key. -func (pair *ckp) PublicKey() (string, error) { - var pub [curveKeyLen]byte - curve25519.ScalarBaseMult(&pub, &pair.seed) - key, err := Encode(PrefixByteCurve, pub[:]) - return string(key), err -} - -// PrivateKey will return the encoded private key. -func (pair *ckp) PrivateKey() ([]byte, error) { - return Encode(PrefixBytePrivate, pair.seed[:]) -} - -func decodePubCurveKey(src string, dest [curveKeyLen]byte) error { - var raw [curveDecodeLen]byte // should always be 35 - n, err := b32Enc.Decode(raw[:], []byte(src)) - if err != nil { - return err - } - if n != curveDecodeLen { - return ErrInvalidCurveKey - } - // Make sure it is what we expected. - if prefix := PrefixByte(raw[0]); prefix != PrefixByteCurve { - return ErrInvalidPublicKey - } - var crc uint16 - end := n - 2 - sum := raw[end:n] - checksum := bytes.NewReader(sum) - if err := binary.Read(checksum, binary.LittleEndian, &crc); err != nil { - return err - } - - // ensure checksum is valid - if err := validate(raw[:end], crc); err != nil { - return err - } - - // Copy over, ignore prefix byte. - copy(dest[:], raw[1:end]) - return nil -} - -// Only version for now, but could add in X3DH in the future, etc. -const XKeyVersionV1 = "xkv1" -const vlen = len(XKeyVersionV1) - -// Seal is compatible with nacl.Box.Seal() and can be used in similar situations for small messages. -// We generate the nonce from crypto rand by default. -func (pair *ckp) Seal(input []byte, recipient string) ([]byte, error) { - return pair.SealWithRand(input, recipient, rand.Reader) -} - -func (pair *ckp) SealWithRand(input []byte, recipient string, rr io.Reader) ([]byte, error) { - var ( - rpub [curveKeyLen]byte - nonce [curveNonceLen]byte - out [vlen + curveNonceLen]byte - err error - ) - - if err = decodePubCurveKey(recipient, rpub); err != nil { - return nil, ErrInvalidRecipient - } - if _, err := io.ReadFull(rr, nonce[:]); err != nil { - return nil, err - } - copy(out[:vlen], []byte(XKeyVersionV1)) - copy(out[vlen:], nonce[:]) - return box.Seal(out[:], input, &nonce, &rpub, &pair.seed), nil -} - -func (pair *ckp) Open(input []byte, sender string) ([]byte, error) { - if len(input) <= vlen+curveNonceLen { - return nil, ErrInvalidEncrypted - } - var ( - spub [curveKeyLen]byte - nonce [curveNonceLen]byte - err error - ) - if !bytes.Equal(input[:vlen], []byte(XKeyVersionV1)) { - return nil, ErrInvalidEncVersion - } - copy(nonce[:], input[vlen:vlen+curveNonceLen]) - - if err = decodePubCurveKey(sender, spub); err != nil { - return nil, ErrInvalidSender - } - - decrypted, ok := box.Open(nil, input[vlen+curveNonceLen:], &nonce, &spub, &pair.seed) - if !ok { - return nil, ErrCouldNotDecrypt - } - return decrypted, nil -} - -// Wipe will randomize the contents of the secret key -func (pair *ckp) Wipe() { - io.ReadFull(rand.Reader, pair.seed[:]) -} - -func (pair *ckp) Sign(_ []byte) ([]byte, error) { - return nil, ErrInvalidCurveKeyOperation -} - -func (pair *ckp) Verify(_ []byte, _ []byte) error { - return ErrInvalidCurveKeyOperation -} diff --git a/vendor/github.com/nats-io/nuid/.gitignore b/vendor/github.com/nats-io/nuid/.gitignore deleted file mode 100644 index daf913b1..00000000 --- a/vendor/github.com/nats-io/nuid/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/nats-io/nuid/.travis.yml b/vendor/github.com/nats-io/nuid/.travis.yml deleted file mode 100644 index 52be7265..00000000 --- a/vendor/github.com/nats-io/nuid/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -language: go -sudo: false -go: -- 1.9.x -- 1.10.x - -install: -- go get -t ./... -- go get github.com/mattn/goveralls - -script: -- go fmt ./... -- go vet ./... -- go test -v -- go test -v --race -- go test -v -covermode=count -coverprofile=coverage.out -- $HOME/gopath/bin/goveralls -coverprofile coverage.out -service travis-ci diff --git a/vendor/github.com/nats-io/nuid/GOVERNANCE.md b/vendor/github.com/nats-io/nuid/GOVERNANCE.md deleted file mode 100644 index 01aee70d..00000000 --- a/vendor/github.com/nats-io/nuid/GOVERNANCE.md +++ /dev/null @@ -1,3 +0,0 @@ -# NATS NUID Governance - -NATS NUID is part of the NATS project and is subject to the [NATS Governance](https://github.com/nats-io/nats-general/blob/master/GOVERNANCE.md). \ No newline at end of file diff --git a/vendor/github.com/nats-io/nuid/LICENSE b/vendor/github.com/nats-io/nuid/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/vendor/github.com/nats-io/nuid/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/nats-io/nuid/MAINTAINERS.md b/vendor/github.com/nats-io/nuid/MAINTAINERS.md deleted file mode 100644 index 6d0ed3e3..00000000 --- a/vendor/github.com/nats-io/nuid/MAINTAINERS.md +++ /dev/null @@ -1,6 +0,0 @@ -# Maintainers - -Maintainership is on a per project basis. - -### Core-maintainers - - Derek Collison [@derekcollison](https://github.com/derekcollison) \ No newline at end of file diff --git a/vendor/github.com/nats-io/nuid/README.md b/vendor/github.com/nats-io/nuid/README.md deleted file mode 100644 index 16e53948..00000000 --- a/vendor/github.com/nats-io/nuid/README.md +++ /dev/null @@ -1,47 +0,0 @@ -# NUID - -[![License Apache 2](https://img.shields.io/badge/License-Apache2-blue.svg)](https://www.apache.org/licenses/LICENSE-2.0) -[![ReportCard](http://goreportcard.com/badge/nats-io/nuid)](http://goreportcard.com/report/nats-io/nuid) -[![Build Status](https://travis-ci.org/nats-io/nuid.svg?branch=master)](http://travis-ci.org/nats-io/nuid) -[![Release](https://img.shields.io/badge/release-v1.0.1-1eb0fc.svg)](https://github.com/nats-io/nuid/releases/tag/v1.0.1) -[![GoDoc](http://godoc.org/github.com/nats-io/nuid?status.png)](http://godoc.org/github.com/nats-io/nuid) -[![Coverage Status](https://coveralls.io/repos/github/nats-io/nuid/badge.svg?branch=master)](https://coveralls.io/github/nats-io/nuid?branch=master) - -A highly performant unique identifier generator. - -## Installation - -Use the `go` command: - - $ go get github.com/nats-io/nuid - -## Basic Usage -```go - -// Utilize the global locked instance -nuid := nuid.Next() - -// Create an instance, these are not locked. -n := nuid.New() -nuid = n.Next() - -// Generate a new crypto/rand seeded prefix. -// Generally not needed, happens automatically. -n.RandomizePrefix() -``` - -## Performance -NUID needs to be very fast to generate and be truly unique, all while being entropy pool friendly. -NUID uses 12 bytes of crypto generated data (entropy draining), and 10 bytes of pseudo-random -sequential data that increments with a pseudo-random increment. - -Total length of a NUID string is 22 bytes of base 62 ascii text, so 62^22 or -2707803647802660400290261537185326956544 possibilities. - -NUID can generate identifiers as fast as 60ns, or ~16 million per second. There is an associated -benchmark you can use to test performance on your own hardware. - -## License - -Unless otherwise noted, the NATS source files are distributed -under the Apache Version 2.0 license found in the LICENSE file. diff --git a/vendor/github.com/nats-io/nuid/nuid.go b/vendor/github.com/nats-io/nuid/nuid.go deleted file mode 100644 index 8134c764..00000000 --- a/vendor/github.com/nats-io/nuid/nuid.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2016-2019 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// A unique identifier generator that is high performance, very fast, and tries to be entropy pool friendly. -package nuid - -import ( - "crypto/rand" - "fmt" - "math" - "math/big" - "sync" - "time" - - prand "math/rand" -) - -// NUID needs to be very fast to generate and truly unique, all while being entropy pool friendly. -// We will use 12 bytes of crypto generated data (entropy draining), and 10 bytes of sequential data -// that is started at a pseudo random number and increments with a pseudo-random increment. -// Total is 22 bytes of base 62 ascii text :) - -// Version of the library -const Version = "1.0.1" - -const ( - digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" - base = 62 - preLen = 12 - seqLen = 10 - maxSeq = int64(839299365868340224) // base^seqLen == 62^10 - minInc = int64(33) - maxInc = int64(333) - totalLen = preLen + seqLen -) - -type NUID struct { - pre []byte - seq int64 - inc int64 -} - -type lockedNUID struct { - sync.Mutex - *NUID -} - -// Global NUID -var globalNUID *lockedNUID - -// Seed sequential random with crypto or math/random and current time -// and generate crypto prefix. -func init() { - r, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) - if err != nil { - prand.Seed(time.Now().UnixNano()) - } else { - prand.Seed(r.Int64()) - } - globalNUID = &lockedNUID{NUID: New()} - globalNUID.RandomizePrefix() -} - -// New will generate a new NUID and properly initialize the prefix, sequential start, and sequential increment. -func New() *NUID { - n := &NUID{ - seq: prand.Int63n(maxSeq), - inc: minInc + prand.Int63n(maxInc-minInc), - pre: make([]byte, preLen), - } - n.RandomizePrefix() - return n -} - -// Generate the next NUID string from the global locked NUID instance. -func Next() string { - globalNUID.Lock() - nuid := globalNUID.Next() - globalNUID.Unlock() - return nuid -} - -// Generate the next NUID string. -func (n *NUID) Next() string { - // Increment and capture. - n.seq += n.inc - if n.seq >= maxSeq { - n.RandomizePrefix() - n.resetSequential() - } - seq := n.seq - - // Copy prefix - var b [totalLen]byte - bs := b[:preLen] - copy(bs, n.pre) - - // copy in the seq in base62. - for i, l := len(b), seq; i > preLen; l /= base { - i -= 1 - b[i] = digits[l%base] - } - return string(b[:]) -} - -// Resets the sequential portion of the NUID. -func (n *NUID) resetSequential() { - n.seq = prand.Int63n(maxSeq) - n.inc = minInc + prand.Int63n(maxInc-minInc) -} - -// Generate a new prefix from crypto/rand. -// This call *can* drain entropy and will be called automatically when we exhaust the sequential range. -// Will panic if it gets an error from rand.Int() -func (n *NUID) RandomizePrefix() { - var cb [preLen]byte - cbs := cb[:] - if nb, err := rand.Read(cbs); nb != preLen || err != nil { - panic(fmt.Sprintf("nuid: failed generating crypto random number: %v\n", err)) - } - - for i := 0; i < preLen; i++ { - n.pre[i] = digits[int(cbs[i])%base] - } -} diff --git a/vendor/github.com/pelletier/go-toml/.dockerignore b/vendor/github.com/pelletier/go-toml/.dockerignore deleted file mode 100644 index 7b588347..00000000 --- a/vendor/github.com/pelletier/go-toml/.dockerignore +++ /dev/null @@ -1,2 +0,0 @@ -cmd/tomll/tomll -cmd/tomljson/tomljson diff --git a/vendor/github.com/pelletier/go-toml/.gitignore b/vendor/github.com/pelletier/go-toml/.gitignore deleted file mode 100644 index e6ba63a5..00000000 --- a/vendor/github.com/pelletier/go-toml/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -test_program/test_program_bin -fuzz/ -cmd/tomll/tomll -cmd/tomljson/tomljson -cmd/tomltestgen/tomltestgen diff --git a/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md b/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md deleted file mode 100644 index 98b9893d..00000000 --- a/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md +++ /dev/null @@ -1,132 +0,0 @@ -## Contributing - -Thank you for your interest in go-toml! We appreciate you considering -contributing to go-toml! - -The main goal is the project is to provide an easy-to-use TOML -implementation for Go that gets the job done and gets out of your way – -dealing with TOML is probably not the central piece of your project. - -As the single maintainer of go-toml, time is scarce. All help, big or -small, is more than welcomed! - -### Ask questions - -Any question you may have, somebody else might have it too. Always feel -free to ask them on the [issues tracker][issues-tracker]. We will try to -answer them as clearly and quickly as possible, time permitting. - -Asking questions also helps us identify areas where the documentation needs -improvement, or new features that weren't envisioned before. Sometimes, a -seemingly innocent question leads to the fix of a bug. Don't hesitate and -ask away! - -### Improve the documentation - -The best way to share your knowledge and experience with go-toml is to -improve the documentation. Fix a typo, clarify an interface, add an -example, anything goes! - -The documentation is present in the [README][readme] and thorough the -source code. On release, it gets updated on [pkg.go.dev][pkg.go.dev]. To make a -change to the documentation, create a pull request with your proposed -changes. For simple changes like that, the easiest way to go is probably -the "Fork this project and edit the file" button on Github, displayed at -the top right of the file. Unless it's a trivial change (for example a -typo), provide a little bit of context in your pull request description or -commit message. - -### Report a bug - -Found a bug! Sorry to hear that :(. Help us and other track them down and -fix by reporting it. [File a new bug report][bug-report] on the [issues -tracker][issues-tracker]. The template should provide enough guidance on -what to include. When in doubt: add more details! By reducing ambiguity and -providing more information, it decreases back and forth and saves everyone -time. - -### Code changes - -Want to contribute a patch? Very happy to hear that! - -First, some high-level rules: - -* A short proposal with some POC code is better than a lengthy piece of - text with no code. Code speaks louder than words. -* No backward-incompatible patch will be accepted unless discussed. - Sometimes it's hard, and Go's lack of versioning by default does not - help, but we try not to break people's programs unless we absolutely have - to. -* If you are writing a new feature or extending an existing one, make sure - to write some documentation. -* Bug fixes need to be accompanied with regression tests. -* New code needs to be tested. -* Your commit messages need to explain why the change is needed, even if - already included in the PR description. - -It does sound like a lot, but those best practices are here to save time -overall and continuously improve the quality of the project, which is -something everyone benefits from. - -#### Get started - -The fairly standard code contribution process looks like that: - -1. [Fork the project][fork]. -2. Make your changes, commit on any branch you like. -3. [Open up a pull request][pull-request] -4. Review, potential ask for changes. -5. Merge. You're in! - -Feel free to ask for help! You can create draft pull requests to gather -some early feedback! - -#### Run the tests - -You can run tests for go-toml using Go's test tool: `go test ./...`. -When creating a pull requests, all tests will be ran on Linux on a few Go -versions (Travis CI), and on Windows using the latest Go version -(AppVeyor). - -#### Style - -Try to look around and follow the same format and structure as the rest of -the code. We enforce using `go fmt` on the whole code base. - ---- - -### Maintainers-only - -#### Merge pull request - -Checklist: - -* Passing CI. -* Does not introduce backward-incompatible changes (unless discussed). -* Has relevant doc changes. -* Has relevant unit tests. - -1. Merge using "squash and merge". -2. Make sure to edit the commit message to keep all the useful information - nice and clean. -3. Make sure the commit title is clear and contains the PR number (#123). - -#### New release - -1. Go to [releases][releases]. Click on "X commits to master since this - release". -2. Make note of all the changes. Look for backward incompatible changes, - new features, and bug fixes. -3. Pick the new version using the above and semver. -4. Create a [new release][new-release]. -5. Follow the same format as [1.1.0][release-110]. - -[issues-tracker]: https://github.com/pelletier/go-toml/issues -[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md -[pkg.go.dev]: https://pkg.go.dev/github.com/pelletier/go-toml -[readme]: ./README.md -[fork]: https://help.github.com/articles/fork-a-repo -[pull-request]: https://help.github.com/en/articles/creating-a-pull-request -[releases]: https://github.com/pelletier/go-toml/releases -[new-release]: https://github.com/pelletier/go-toml/releases/new -[release-110]: https://github.com/pelletier/go-toml/releases/tag/v1.1.0 diff --git a/vendor/github.com/pelletier/go-toml/Dockerfile b/vendor/github.com/pelletier/go-toml/Dockerfile deleted file mode 100644 index fffdb016..00000000 --- a/vendor/github.com/pelletier/go-toml/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM golang:1.12-alpine3.9 as builder -WORKDIR /go/src/github.com/pelletier/go-toml -COPY . . -ENV CGO_ENABLED=0 -ENV GOOS=linux -RUN go install ./... - -FROM scratch -COPY --from=builder /go/bin/tomll /usr/bin/tomll -COPY --from=builder /go/bin/tomljson /usr/bin/tomljson -COPY --from=builder /go/bin/jsontoml /usr/bin/jsontoml diff --git a/vendor/github.com/pelletier/go-toml/LICENSE b/vendor/github.com/pelletier/go-toml/LICENSE deleted file mode 100644 index f414553c..00000000 --- a/vendor/github.com/pelletier/go-toml/LICENSE +++ /dev/null @@ -1,247 +0,0 @@ -The bulk of github.com/pelletier/go-toml is distributed under the MIT license -(see below), with the exception of localtime.go and localtime.test.go. -Those two files have been copied over from Google's civil library at revision -ed46f5086358513cf8c25f8e3f022cb838a49d66, and are distributed under the Apache -2.0 license (see below). - - -github.com/pelletier/go-toml: - - -The MIT License (MIT) - -Copyright (c) 2013 - 2021 Thomas Pelletier, Eric Anderton - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -localtime.go, localtime_test.go: - -Originals: - https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil.go - https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil_test.go -Changes: - * Renamed files from civil* to localtime*. - * Package changed from civil to toml. - * 'Local' prefix added to all structs. -License: - https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/LICENSE - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/pelletier/go-toml/Makefile b/vendor/github.com/pelletier/go-toml/Makefile deleted file mode 100644 index 9e4503ae..00000000 --- a/vendor/github.com/pelletier/go-toml/Makefile +++ /dev/null @@ -1,29 +0,0 @@ -export CGO_ENABLED=0 -go := go -go.goos ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f1) -go.goarch ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f2) - -out.tools := tomll tomljson jsontoml -out.dist := $(out.tools:=_$(go.goos)_$(go.goarch).tar.xz) -sources := $(wildcard **/*.go) - - -.PHONY: -tools: $(out.tools) - -$(out.tools): $(sources) - GOOS=$(go.goos) GOARCH=$(go.goarch) $(go) build ./cmd/$@ - -.PHONY: -dist: $(out.dist) - -$(out.dist):%_$(go.goos)_$(go.goarch).tar.xz: % - if [ "$(go.goos)" = "windows" ]; then \ - tar -cJf $@ $^.exe; \ - else \ - tar -cJf $@ $^; \ - fi - -.PHONY: -clean: - rm -rf $(out.tools) $(out.dist) diff --git a/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 041cdc4a..00000000 --- a/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,5 +0,0 @@ -**Issue:** add link to pelletier/go-toml issue here - -Explanation of what this pull request does. - -More detailed description of the decisions being made and the reasons why (if the patch is non-trivial). diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md deleted file mode 100644 index 7399e04b..00000000 --- a/vendor/github.com/pelletier/go-toml/README.md +++ /dev/null @@ -1,176 +0,0 @@ -# go-toml - -Go library for the [TOML](https://toml.io/) format. - -This library supports TOML version -[v1.0.0-rc.3](https://toml.io/en/v1.0.0-rc.3) - -[![Go Reference](https://pkg.go.dev/badge/github.com/pelletier/go-toml.svg)](https://pkg.go.dev/github.com/pelletier/go-toml) -[![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE) -[![Build Status](https://dev.azure.com/pelletierthomas/go-toml-ci/_apis/build/status/pelletier.go-toml?branchName=master)](https://dev.azure.com/pelletierthomas/go-toml-ci/_build/latest?definitionId=1&branchName=master) -[![codecov](https://codecov.io/gh/pelletier/go-toml/branch/master/graph/badge.svg)](https://codecov.io/gh/pelletier/go-toml) -[![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml) -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml?ref=badge_shield) - - -## Development status - -**ℹ️ Consider go-toml v2!** - -The next version of go-toml is in [active development][v2-dev], and -[nearing completion][v2-map]. - -Though technically in beta, v2 is already more tested, [fixes bugs][v1-bugs], -and [much faster][v2-bench]. If you only need reading and writing TOML documents -(majority of cases), those features are implemented and the API unlikely to -change. - -The remaining features will be added shortly. While pull-requests are welcome on -v1, no active development is expected on it. When v2.0.0 is released, v1 will be -deprecated. - -👉 [go-toml v2][v2] - -[v2]: https://github.com/pelletier/go-toml/tree/v2 -[v2-map]: https://github.com/pelletier/go-toml/discussions/506 -[v2-dev]: https://github.com/pelletier/go-toml/tree/v2 -[v1-bugs]: https://github.com/pelletier/go-toml/issues?q=is%3Aissue+is%3Aopen+label%3Av2-fixed -[v2-bench]: https://github.com/pelletier/go-toml/tree/v2#benchmarks - -## Features - -Go-toml provides the following features for using data parsed from TOML documents: - -* Load TOML documents from files and string data -* Easily navigate TOML structure using Tree -* Marshaling and unmarshaling to and from data structures -* Line & column position data for all parsed elements -* [Query support similar to JSON-Path](query/) -* Syntax errors contain line and column numbers - -## Import - -```go -import "github.com/pelletier/go-toml" -``` - -## Usage example - -Read a TOML document: - -```go -config, _ := toml.Load(` -[postgres] -user = "pelletier" -password = "mypassword"`) -// retrieve data directly -user := config.Get("postgres.user").(string) - -// or using an intermediate object -postgresConfig := config.Get("postgres").(*toml.Tree) -password := postgresConfig.Get("password").(string) -``` - -Or use Unmarshal: - -```go -type Postgres struct { - User string - Password string -} -type Config struct { - Postgres Postgres -} - -doc := []byte(` -[Postgres] -User = "pelletier" -Password = "mypassword"`) - -config := Config{} -toml.Unmarshal(doc, &config) -fmt.Println("user=", config.Postgres.User) -``` - -Or use a query: - -```go -// use a query to gather elements without walking the tree -q, _ := query.Compile("$..[user,password]") -results := q.Execute(config) -for ii, item := range results.Values() { - fmt.Printf("Query result %d: %v\n", ii, item) -} -``` - -## Documentation - -The documentation and additional examples are available at -[pkg.go.dev](https://pkg.go.dev/github.com/pelletier/go-toml). - -## Tools - -Go-toml provides three handy command line tools: - -* `tomll`: Reads TOML files and lints them. - - ``` - go install github.com/pelletier/go-toml/cmd/tomll - tomll --help - ``` -* `tomljson`: Reads a TOML file and outputs its JSON representation. - - ``` - go install github.com/pelletier/go-toml/cmd/tomljson - tomljson --help - ``` - - * `jsontoml`: Reads a JSON file and outputs a TOML representation. - - ``` - go install github.com/pelletier/go-toml/cmd/jsontoml - jsontoml --help - ``` - -### Docker image - -Those tools are also available as a Docker image from -[dockerhub](https://hub.docker.com/r/pelletier/go-toml). For example, to -use `tomljson`: - -``` -docker run -v $PWD:/workdir pelletier/go-toml tomljson /workdir/example.toml -``` - -Only master (`latest`) and tagged versions are published to dockerhub. You -can build your own image as usual: - -``` -docker build -t go-toml . -``` - -## Contribute - -Feel free to report bugs and patches using GitHub's pull requests system on -[pelletier/go-toml](https://github.com/pelletier/go-toml). Any feedback would be -much appreciated! - -### Run tests - -`go test ./...` - -### Fuzzing - -The script `./fuzz.sh` is available to -run [go-fuzz](https://github.com/dvyukov/go-fuzz) on go-toml. - -## Versioning - -Go-toml follows [Semantic Versioning](http://semver.org/). The supported version -of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of -this document. The last two major versions of Go are supported -(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)). - -## License - -The MIT License (MIT) + Apache 2.0. Read [LICENSE](LICENSE). diff --git a/vendor/github.com/pelletier/go-toml/SECURITY.md b/vendor/github.com/pelletier/go-toml/SECURITY.md deleted file mode 100644 index b2f21cfc..00000000 --- a/vendor/github.com/pelletier/go-toml/SECURITY.md +++ /dev/null @@ -1,19 +0,0 @@ -# Security Policy - -## Supported Versions - -Use this section to tell people about which versions of your project are -currently being supported with security updates. - -| Version | Supported | -| ---------- | ------------------ | -| Latest 2.x | :white_check_mark: | -| All 1.x | :x: | -| All 0.x | :x: | - -## Reporting a Vulnerability - -Email a vulnerability report to `security@pelletier.codes`. Make sure to include -as many details as possible to reproduce the vulnerability. This is a -side-project: I will try to get back to you as quickly as possible, time -permitting in my personal life. Providing a working patch helps very much! diff --git a/vendor/github.com/pelletier/go-toml/azure-pipelines.yml b/vendor/github.com/pelletier/go-toml/azure-pipelines.yml deleted file mode 100644 index 4af198b4..00000000 --- a/vendor/github.com/pelletier/go-toml/azure-pipelines.yml +++ /dev/null @@ -1,188 +0,0 @@ -trigger: -- master - -stages: -- stage: run_checks - displayName: "Check" - dependsOn: [] - jobs: - - job: fmt - displayName: "fmt" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.16" - inputs: - version: "1.16" - - task: Go@0 - displayName: "go fmt ./..." - inputs: - command: 'custom' - customCommand: 'fmt' - arguments: './...' - - job: coverage - displayName: "coverage" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.16" - inputs: - version: "1.16" - - task: Go@0 - displayName: "Generate coverage" - inputs: - command: 'test' - arguments: "-race -coverprofile=coverage.txt -covermode=atomic" - - task: Bash@3 - inputs: - targetType: 'inline' - script: 'bash <(curl -s https://codecov.io/bash) -t ${CODECOV_TOKEN}' - env: - CODECOV_TOKEN: $(CODECOV_TOKEN) - - job: benchmark - displayName: "benchmark" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.16" - inputs: - version: "1.16" - - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" - - task: Bash@3 - inputs: - filePath: './benchmark.sh' - arguments: "master $(Build.Repository.Uri)" - - - job: go_unit_tests - displayName: "unit tests" - strategy: - matrix: - linux 1.16: - goVersion: '1.16' - imageName: 'ubuntu-latest' - mac 1.16: - goVersion: '1.16' - imageName: 'macOS-latest' - windows 1.16: - goVersion: '1.16' - imageName: 'windows-latest' - linux 1.15: - goVersion: '1.15' - imageName: 'ubuntu-latest' - mac 1.15: - goVersion: '1.15' - imageName: 'macOS-latest' - windows 1.15: - goVersion: '1.15' - imageName: 'windows-latest' - pool: - vmImage: $(imageName) - steps: - - task: GoTool@0 - displayName: "Install Go $(goVersion)" - inputs: - version: $(goVersion) - - task: Go@0 - displayName: "go test ./..." - inputs: - command: 'test' - arguments: './...' -- stage: build_binaries - displayName: "Build binaries" - dependsOn: run_checks - jobs: - - job: build_binary - displayName: "Build binary" - strategy: - matrix: - linux_amd64: - GOOS: linux - GOARCH: amd64 - darwin_amd64: - GOOS: darwin - GOARCH: amd64 - windows_amd64: - GOOS: windows - GOARCH: amd64 - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go" - inputs: - version: 1.16 - - task: Bash@3 - inputs: - targetType: inline - script: "make dist" - env: - go.goos: $(GOOS) - go.goarch: $(GOARCH) - - task: CopyFiles@2 - inputs: - sourceFolder: '$(Build.SourcesDirectory)' - contents: '*.tar.xz' - TargetFolder: '$(Build.ArtifactStagingDirectory)' - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: '$(Build.ArtifactStagingDirectory)' - artifactName: binaries -- stage: build_binaries_manifest - displayName: "Build binaries manifest" - dependsOn: build_binaries - jobs: - - job: build_manifest - displayName: "Build binaries manifest" - steps: - - task: DownloadBuildArtifacts@0 - inputs: - buildType: 'current' - downloadType: 'single' - artifactName: 'binaries' - downloadPath: '$(Build.SourcesDirectory)' - - task: Bash@3 - inputs: - targetType: inline - script: "cd binaries && sha256sum --binary *.tar.xz | tee $(Build.ArtifactStagingDirectory)/sha256sums.txt" - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: '$(Build.ArtifactStagingDirectory)' - artifactName: manifest - -- stage: build_docker_image - displayName: "Build Docker image" - dependsOn: run_checks - jobs: - - job: build - displayName: "Build" - pool: - vmImage: ubuntu-latest - steps: - - task: Docker@2 - inputs: - command: 'build' - Dockerfile: 'Dockerfile' - buildContext: '.' - addPipelineData: false - -- stage: publish_docker_image - displayName: "Publish Docker image" - dependsOn: build_docker_image - condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master')) - jobs: - - job: publish - displayName: "Publish" - pool: - vmImage: ubuntu-latest - steps: - - task: Docker@2 - inputs: - containerRegistry: 'DockerHub' - repository: 'pelletier/go-toml' - command: 'buildAndPush' - Dockerfile: 'Dockerfile' - buildContext: '.' - tags: 'latest' diff --git a/vendor/github.com/pelletier/go-toml/benchmark.sh b/vendor/github.com/pelletier/go-toml/benchmark.sh deleted file mode 100644 index a69d3040..00000000 --- a/vendor/github.com/pelletier/go-toml/benchmark.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -set -ex - -reference_ref=${1:-master} -reference_git=${2:-.} - -if ! `hash benchstat 2>/dev/null`; then - echo "Installing benchstat" - go get golang.org/x/perf/cmd/benchstat -fi - -tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX` -ref_tempdir="${tempdir}/ref" -ref_benchmark="${ref_tempdir}/benchmark-`echo -n ${reference_ref}|tr -s '/' '-'`.txt" -local_benchmark="`pwd`/benchmark-local.txt" - -echo "=== ${reference_ref} (${ref_tempdir})" -git clone ${reference_git} ${ref_tempdir} >/dev/null 2>/dev/null -pushd ${ref_tempdir} >/dev/null -git checkout ${reference_ref} >/dev/null 2>/dev/null -go test -bench=. -benchmem | tee ${ref_benchmark} -cd benchmark -go test -bench=. -benchmem | tee -a ${ref_benchmark} -popd >/dev/null - -echo "" -echo "=== local" -go test -bench=. -benchmem | tee ${local_benchmark} -cd benchmark -go test -bench=. -benchmem | tee -a ${local_benchmark} - -echo "" -echo "=== diff" -benchstat -delta-test=none ${ref_benchmark} ${local_benchmark} diff --git a/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/pelletier/go-toml/doc.go deleted file mode 100644 index a1406a32..00000000 --- a/vendor/github.com/pelletier/go-toml/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Package toml is a TOML parser and manipulation library. -// -// This version supports the specification as described in -// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.5.0.md -// -// Marshaling -// -// Go-toml can marshal and unmarshal TOML documents from and to data -// structures. -// -// TOML document as a tree -// -// Go-toml can operate on a TOML document as a tree. Use one of the Load* -// functions to parse TOML data and obtain a Tree instance, then one of its -// methods to manipulate the tree. -// -// JSONPath-like queries -// -// The package github.com/pelletier/go-toml/query implements a system -// similar to JSONPath to quickly retrieve elements of a TOML document using a -// single expression. See the package documentation for more information. -// -package toml diff --git a/vendor/github.com/pelletier/go-toml/example-crlf.toml b/vendor/github.com/pelletier/go-toml/example-crlf.toml deleted file mode 100644 index 780d9c68..00000000 --- a/vendor/github.com/pelletier/go-toml/example-crlf.toml +++ /dev/null @@ -1,30 +0,0 @@ -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it -score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/example.toml b/vendor/github.com/pelletier/go-toml/example.toml deleted file mode 100644 index f45bf88b..00000000 --- a/vendor/github.com/pelletier/go-toml/example.toml +++ /dev/null @@ -1,30 +0,0 @@ -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it -score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/fuzz.go b/vendor/github.com/pelletier/go-toml/fuzz.go deleted file mode 100644 index 14570c8d..00000000 --- a/vendor/github.com/pelletier/go-toml/fuzz.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build gofuzz - -package toml - -func Fuzz(data []byte) int { - tree, err := LoadBytes(data) - if err != nil { - if tree != nil { - panic("tree must be nil if there is an error") - } - return 0 - } - - str, err := tree.ToTomlString() - if err != nil { - if str != "" { - panic(`str must be "" if there is an error`) - } - panic(err) - } - - tree, err = Load(str) - if err != nil { - if tree != nil { - panic("tree must be nil if there is an error") - } - return 0 - } - - return 1 -} diff --git a/vendor/github.com/pelletier/go-toml/fuzz.sh b/vendor/github.com/pelletier/go-toml/fuzz.sh deleted file mode 100644 index 3204b4c4..00000000 --- a/vendor/github.com/pelletier/go-toml/fuzz.sh +++ /dev/null @@ -1,15 +0,0 @@ -#! /bin/sh -set -eu - -go get github.com/dvyukov/go-fuzz/go-fuzz -go get github.com/dvyukov/go-fuzz/go-fuzz-build - -if [ ! -e toml-fuzz.zip ]; then - go-fuzz-build github.com/pelletier/go-toml -fi - -rm -fr fuzz -mkdir -p fuzz/corpus -cp *.toml fuzz/corpus - -go-fuzz -bin=toml-fuzz.zip -workdir=fuzz diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go deleted file mode 100644 index e091500b..00000000 --- a/vendor/github.com/pelletier/go-toml/keysparsing.go +++ /dev/null @@ -1,112 +0,0 @@ -// Parsing keys handling both bare and quoted keys. - -package toml - -import ( - "errors" - "fmt" -) - -// Convert the bare key group string to an array. -// The input supports double quotation and single quotation, -// but escape sequences are not supported. Lexers must unescape them beforehand. -func parseKey(key string) ([]string, error) { - runes := []rune(key) - var groups []string - - if len(key) == 0 { - return nil, errors.New("empty key") - } - - idx := 0 - for idx < len(runes) { - for ; idx < len(runes) && isSpace(runes[idx]); idx++ { - // skip leading whitespace - } - if idx >= len(runes) { - break - } - r := runes[idx] - if isValidBareChar(r) { - // parse bare key - startIdx := idx - endIdx := -1 - idx++ - for idx < len(runes) { - r = runes[idx] - if isValidBareChar(r) { - idx++ - } else if r == '.' { - endIdx = idx - break - } else if isSpace(r) { - endIdx = idx - for ; idx < len(runes) && isSpace(runes[idx]); idx++ { - // skip trailing whitespace - } - if idx < len(runes) && runes[idx] != '.' { - return nil, fmt.Errorf("invalid key character after whitespace: %c", runes[idx]) - } - break - } else { - return nil, fmt.Errorf("invalid bare key character: %c", r) - } - } - if endIdx == -1 { - endIdx = idx - } - groups = append(groups, string(runes[startIdx:endIdx])) - } else if r == '\'' { - // parse single quoted key - idx++ - startIdx := idx - for { - if idx >= len(runes) { - return nil, fmt.Errorf("unclosed single-quoted key") - } - r = runes[idx] - if r == '\'' { - groups = append(groups, string(runes[startIdx:idx])) - idx++ - break - } - idx++ - } - } else if r == '"' { - // parse double quoted key - idx++ - startIdx := idx - for { - if idx >= len(runes) { - return nil, fmt.Errorf("unclosed double-quoted key") - } - r = runes[idx] - if r == '"' { - groups = append(groups, string(runes[startIdx:idx])) - idx++ - break - } - idx++ - } - } else if r == '.' { - idx++ - if idx >= len(runes) { - return nil, fmt.Errorf("unexpected end of key") - } - r = runes[idx] - if !isValidBareChar(r) && r != '\'' && r != '"' && r != ' ' { - return nil, fmt.Errorf("expecting key part after dot") - } - } else { - return nil, fmt.Errorf("invalid key character: %c", r) - } - } - if len(groups) == 0 { - return nil, fmt.Errorf("empty key") - } - return groups, nil -} - -func isValidBareChar(r rune) bool { - return isAlphanumeric(r) || r == '-' || isDigit(r) -} diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go deleted file mode 100644 index 313908e3..00000000 --- a/vendor/github.com/pelletier/go-toml/lexer.go +++ /dev/null @@ -1,1031 +0,0 @@ -// TOML lexer. -// -// Written using the principles developed by Rob Pike in -// http://www.youtube.com/watch?v=HxaD_trXwRE - -package toml - -import ( - "bytes" - "errors" - "fmt" - "strconv" - "strings" -) - -// Define state functions -type tomlLexStateFn func() tomlLexStateFn - -// Define lexer -type tomlLexer struct { - inputIdx int - input []rune // Textual source - currentTokenStart int - currentTokenStop int - tokens []token - brackets []rune - line int - col int - endbufferLine int - endbufferCol int -} - -// Basic read operations on input - -func (l *tomlLexer) read() rune { - r := l.peek() - if r == '\n' { - l.endbufferLine++ - l.endbufferCol = 1 - } else { - l.endbufferCol++ - } - l.inputIdx++ - return r -} - -func (l *tomlLexer) next() rune { - r := l.read() - - if r != eof { - l.currentTokenStop++ - } - return r -} - -func (l *tomlLexer) ignore() { - l.currentTokenStart = l.currentTokenStop - l.line = l.endbufferLine - l.col = l.endbufferCol -} - -func (l *tomlLexer) skip() { - l.next() - l.ignore() -} - -func (l *tomlLexer) fastForward(n int) { - for i := 0; i < n; i++ { - l.next() - } -} - -func (l *tomlLexer) emitWithValue(t tokenType, value string) { - l.tokens = append(l.tokens, token{ - Position: Position{l.line, l.col}, - typ: t, - val: value, - }) - l.ignore() -} - -func (l *tomlLexer) emit(t tokenType) { - l.emitWithValue(t, string(l.input[l.currentTokenStart:l.currentTokenStop])) -} - -func (l *tomlLexer) peek() rune { - if l.inputIdx >= len(l.input) { - return eof - } - return l.input[l.inputIdx] -} - -func (l *tomlLexer) peekString(size int) string { - maxIdx := len(l.input) - upperIdx := l.inputIdx + size // FIXME: potential overflow - if upperIdx > maxIdx { - upperIdx = maxIdx - } - return string(l.input[l.inputIdx:upperIdx]) -} - -func (l *tomlLexer) follow(next string) bool { - return next == l.peekString(len(next)) -} - -// Error management - -func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn { - l.tokens = append(l.tokens, token{ - Position: Position{l.line, l.col}, - typ: tokenError, - val: fmt.Sprintf(format, args...), - }) - return nil -} - -// State functions - -func (l *tomlLexer) lexVoid() tomlLexStateFn { - for { - next := l.peek() - switch next { - case '}': // after '{' - return l.lexRightCurlyBrace - case '[': - return l.lexTableKey - case '#': - return l.lexComment(l.lexVoid) - case '=': - return l.lexEqual - case '\r': - fallthrough - case '\n': - l.skip() - continue - } - - if isSpace(next) { - l.skip() - } - - if isKeyStartChar(next) { - return l.lexKey - } - - if next == eof { - l.next() - break - } - } - - l.emit(tokenEOF) - return nil -} - -func (l *tomlLexer) lexRvalue() tomlLexStateFn { - for { - next := l.peek() - switch next { - case '.': - return l.errorf("cannot start float with a dot") - case '=': - return l.lexEqual - case '[': - return l.lexLeftBracket - case ']': - return l.lexRightBracket - case '{': - return l.lexLeftCurlyBrace - case '}': - return l.lexRightCurlyBrace - case '#': - return l.lexComment(l.lexRvalue) - case '"': - return l.lexString - case '\'': - return l.lexLiteralString - case ',': - return l.lexComma - case '\r': - fallthrough - case '\n': - l.skip() - if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '[' { - return l.lexRvalue - } - return l.lexVoid - } - - if l.follow("true") { - return l.lexTrue - } - - if l.follow("false") { - return l.lexFalse - } - - if l.follow("inf") { - return l.lexInf - } - - if l.follow("nan") { - return l.lexNan - } - - if isSpace(next) { - l.skip() - continue - } - - if next == eof { - l.next() - break - } - - if next == '+' || next == '-' { - return l.lexNumber - } - - if isDigit(next) { - return l.lexDateTimeOrNumber - } - - return l.errorf("no value can start with %c", next) - } - - l.emit(tokenEOF) - return nil -} - -func (l *tomlLexer) lexDateTimeOrNumber() tomlLexStateFn { - // Could be either a date/time, or a digit. - // The options for date/times are: - // YYYY-... => date or date-time - // HH:... => time - // Anything else should be a number. - - lookAhead := l.peekString(5) - if len(lookAhead) < 3 { - return l.lexNumber() - } - - for idx, r := range lookAhead { - if !isDigit(r) { - if idx == 2 && r == ':' { - return l.lexDateTimeOrTime() - } - if idx == 4 && r == '-' { - return l.lexDateTimeOrTime() - } - return l.lexNumber() - } - } - return l.lexNumber() -} - -func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn { - l.next() - l.emit(tokenLeftCurlyBrace) - l.brackets = append(l.brackets, '{') - return l.lexVoid -} - -func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn { - l.next() - l.emit(tokenRightCurlyBrace) - if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '{' { - return l.errorf("cannot have '}' here") - } - l.brackets = l.brackets[:len(l.brackets)-1] - return l.lexRvalue -} - -func (l *tomlLexer) lexDateTimeOrTime() tomlLexStateFn { - // Example matches: - // 1979-05-27T07:32:00Z - // 1979-05-27T00:32:00-07:00 - // 1979-05-27T00:32:00.999999-07:00 - // 1979-05-27 07:32:00Z - // 1979-05-27 00:32:00-07:00 - // 1979-05-27 00:32:00.999999-07:00 - // 1979-05-27T07:32:00 - // 1979-05-27T00:32:00.999999 - // 1979-05-27 07:32:00 - // 1979-05-27 00:32:00.999999 - // 1979-05-27 - // 07:32:00 - // 00:32:00.999999 - - // we already know those two are digits - l.next() - l.next() - - // Got 2 digits. At that point it could be either a time or a date(-time). - - r := l.next() - if r == ':' { - return l.lexTime() - } - - return l.lexDateTime() -} - -func (l *tomlLexer) lexDateTime() tomlLexStateFn { - // This state accepts an offset date-time, a local date-time, or a local date. - // - // v--- cursor - // 1979-05-27T07:32:00Z - // 1979-05-27T00:32:00-07:00 - // 1979-05-27T00:32:00.999999-07:00 - // 1979-05-27 07:32:00Z - // 1979-05-27 00:32:00-07:00 - // 1979-05-27 00:32:00.999999-07:00 - // 1979-05-27T07:32:00 - // 1979-05-27T00:32:00.999999 - // 1979-05-27 07:32:00 - // 1979-05-27 00:32:00.999999 - // 1979-05-27 - - // date - - // already checked by lexRvalue - l.next() // digit - l.next() // - - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid month digit in date: %c", r) - } - } - - r := l.next() - if r != '-' { - return l.errorf("expected - to separate month of a date, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid day digit in date: %c", r) - } - } - - l.emit(tokenLocalDate) - - r = l.peek() - - if r == eof { - - return l.lexRvalue - } - - if r != ' ' && r != 'T' { - return l.errorf("incorrect date/time separation character: %c", r) - } - - if r == ' ' { - lookAhead := l.peekString(3)[1:] - if len(lookAhead) < 2 { - return l.lexRvalue - } - for _, r := range lookAhead { - if !isDigit(r) { - return l.lexRvalue - } - } - } - - l.skip() // skip the T or ' ' - - // time - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid hour digit in time: %c", r) - } - } - - r = l.next() - if r != ':' { - return l.errorf("time hour/minute separator should be :, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid minute digit in time: %c", r) - } - } - - r = l.next() - if r != ':' { - return l.errorf("time minute/second separator should be :, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid second digit in time: %c", r) - } - } - - r = l.peek() - if r == '.' { - l.next() - r := l.next() - if !isDigit(r) { - return l.errorf("expected at least one digit in time's fraction, not %c", r) - } - - for { - r := l.peek() - if !isDigit(r) { - break - } - l.next() - } - } - - l.emit(tokenLocalTime) - - return l.lexTimeOffset - -} - -func (l *tomlLexer) lexTimeOffset() tomlLexStateFn { - // potential offset - - // Z - // -07:00 - // +07:00 - // nothing - - r := l.peek() - - if r == 'Z' { - l.next() - l.emit(tokenTimeOffset) - } else if r == '+' || r == '-' { - l.next() - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid hour digit in time offset: %c", r) - } - } - - r = l.next() - if r != ':' { - return l.errorf("time offset hour/minute separator should be :, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid minute digit in time offset: %c", r) - } - } - - l.emit(tokenTimeOffset) - } - - return l.lexRvalue -} - -func (l *tomlLexer) lexTime() tomlLexStateFn { - // v--- cursor - // 07:32:00 - // 00:32:00.999999 - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid minute digit in time: %c", r) - } - } - - r := l.next() - if r != ':' { - return l.errorf("time minute/second separator should be :, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid second digit in time: %c", r) - } - } - - r = l.peek() - if r == '.' { - l.next() - r := l.next() - if !isDigit(r) { - return l.errorf("expected at least one digit in time's fraction, not %c", r) - } - - for { - r := l.peek() - if !isDigit(r) { - break - } - l.next() - } - } - - l.emit(tokenLocalTime) - return l.lexRvalue - -} - -func (l *tomlLexer) lexTrue() tomlLexStateFn { - l.fastForward(4) - l.emit(tokenTrue) - return l.lexRvalue -} - -func (l *tomlLexer) lexFalse() tomlLexStateFn { - l.fastForward(5) - l.emit(tokenFalse) - return l.lexRvalue -} - -func (l *tomlLexer) lexInf() tomlLexStateFn { - l.fastForward(3) - l.emit(tokenInf) - return l.lexRvalue -} - -func (l *tomlLexer) lexNan() tomlLexStateFn { - l.fastForward(3) - l.emit(tokenNan) - return l.lexRvalue -} - -func (l *tomlLexer) lexEqual() tomlLexStateFn { - l.next() - l.emit(tokenEqual) - return l.lexRvalue -} - -func (l *tomlLexer) lexComma() tomlLexStateFn { - l.next() - l.emit(tokenComma) - if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '{' { - return l.lexVoid - } - return l.lexRvalue -} - -// Parse the key and emits its value without escape sequences. -// bare keys, basic string keys and literal string keys are supported. -func (l *tomlLexer) lexKey() tomlLexStateFn { - var sb strings.Builder - - for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() { - if r == '"' { - l.next() - str, err := l.lexStringAsString(`"`, false, true) - if err != nil { - return l.errorf(err.Error()) - } - sb.WriteString("\"") - sb.WriteString(str) - sb.WriteString("\"") - l.next() - continue - } else if r == '\'' { - l.next() - str, err := l.lexLiteralStringAsString(`'`, false) - if err != nil { - return l.errorf(err.Error()) - } - sb.WriteString("'") - sb.WriteString(str) - sb.WriteString("'") - l.next() - continue - } else if r == '\n' { - return l.errorf("keys cannot contain new lines") - } else if isSpace(r) { - var str strings.Builder - str.WriteString(" ") - - // skip trailing whitespace - l.next() - for r = l.peek(); isSpace(r); r = l.peek() { - str.WriteRune(r) - l.next() - } - // break loop if not a dot - if r != '.' { - break - } - str.WriteString(".") - // skip trailing whitespace after dot - l.next() - for r = l.peek(); isSpace(r); r = l.peek() { - str.WriteRune(r) - l.next() - } - sb.WriteString(str.String()) - continue - } else if r == '.' { - // skip - } else if !isValidBareChar(r) { - return l.errorf("keys cannot contain %c character", r) - } - sb.WriteRune(r) - l.next() - } - l.emitWithValue(tokenKey, sb.String()) - return l.lexVoid -} - -func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn { - return func() tomlLexStateFn { - for next := l.peek(); next != '\n' && next != eof; next = l.peek() { - if next == '\r' && l.follow("\r\n") { - break - } - l.next() - } - l.ignore() - return previousState - } -} - -func (l *tomlLexer) lexLeftBracket() tomlLexStateFn { - l.next() - l.emit(tokenLeftBracket) - l.brackets = append(l.brackets, '[') - return l.lexRvalue -} - -func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) { - var sb strings.Builder - - if discardLeadingNewLine { - if l.follow("\r\n") { - l.skip() - l.skip() - } else if l.peek() == '\n' { - l.skip() - } - } - - // find end of string - for { - if l.follow(terminator) { - return sb.String(), nil - } - - next := l.peek() - if next == eof { - break - } - sb.WriteRune(l.next()) - } - - return "", errors.New("unclosed string") -} - -func (l *tomlLexer) lexLiteralString() tomlLexStateFn { - l.skip() - - // handle special case for triple-quote - terminator := "'" - discardLeadingNewLine := false - if l.follow("''") { - l.skip() - l.skip() - terminator = "'''" - discardLeadingNewLine = true - } - - str, err := l.lexLiteralStringAsString(terminator, discardLeadingNewLine) - if err != nil { - return l.errorf(err.Error()) - } - - l.emitWithValue(tokenString, str) - l.fastForward(len(terminator)) - l.ignore() - return l.lexRvalue -} - -// Lex a string and return the results as a string. -// Terminator is the substring indicating the end of the token. -// The resulting string does not include the terminator. -func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) { - var sb strings.Builder - - if discardLeadingNewLine { - if l.follow("\r\n") { - l.skip() - l.skip() - } else if l.peek() == '\n' { - l.skip() - } - } - - for { - if l.follow(terminator) { - return sb.String(), nil - } - - if l.follow("\\") { - l.next() - switch l.peek() { - case '\r': - fallthrough - case '\n': - fallthrough - case '\t': - fallthrough - case ' ': - // skip all whitespace chars following backslash - for strings.ContainsRune("\r\n\t ", l.peek()) { - l.next() - } - case '"': - sb.WriteString("\"") - l.next() - case 'n': - sb.WriteString("\n") - l.next() - case 'b': - sb.WriteString("\b") - l.next() - case 'f': - sb.WriteString("\f") - l.next() - case '/': - sb.WriteString("/") - l.next() - case 't': - sb.WriteString("\t") - l.next() - case 'r': - sb.WriteString("\r") - l.next() - case '\\': - sb.WriteString("\\") - l.next() - case 'u': - l.next() - var code strings.Builder - for i := 0; i < 4; i++ { - c := l.peek() - if !isHexDigit(c) { - return "", errors.New("unfinished unicode escape") - } - l.next() - code.WriteRune(c) - } - intcode, err := strconv.ParseInt(code.String(), 16, 32) - if err != nil { - return "", errors.New("invalid unicode escape: \\u" + code.String()) - } - sb.WriteRune(rune(intcode)) - case 'U': - l.next() - var code strings.Builder - for i := 0; i < 8; i++ { - c := l.peek() - if !isHexDigit(c) { - return "", errors.New("unfinished unicode escape") - } - l.next() - code.WriteRune(c) - } - intcode, err := strconv.ParseInt(code.String(), 16, 64) - if err != nil { - return "", errors.New("invalid unicode escape: \\U" + code.String()) - } - sb.WriteRune(rune(intcode)) - default: - return "", errors.New("invalid escape sequence: \\" + string(l.peek())) - } - } else { - r := l.peek() - - if 0x00 <= r && r <= 0x1F && r != '\t' && !(acceptNewLines && (r == '\n' || r == '\r')) { - return "", fmt.Errorf("unescaped control character %U", r) - } - l.next() - sb.WriteRune(r) - } - - if l.peek() == eof { - break - } - } - - return "", errors.New("unclosed string") -} - -func (l *tomlLexer) lexString() tomlLexStateFn { - l.skip() - - // handle special case for triple-quote - terminator := `"` - discardLeadingNewLine := false - acceptNewLines := false - if l.follow(`""`) { - l.skip() - l.skip() - terminator = `"""` - discardLeadingNewLine = true - acceptNewLines = true - } - - str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines) - if err != nil { - return l.errorf(err.Error()) - } - - l.emitWithValue(tokenString, str) - l.fastForward(len(terminator)) - l.ignore() - return l.lexRvalue -} - -func (l *tomlLexer) lexTableKey() tomlLexStateFn { - l.next() - - if l.peek() == '[' { - // token '[[' signifies an array of tables - l.next() - l.emit(tokenDoubleLeftBracket) - return l.lexInsideTableArrayKey - } - // vanilla table key - l.emit(tokenLeftBracket) - return l.lexInsideTableKey -} - -// Parse the key till "]]", but only bare keys are supported -func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn { - for r := l.peek(); r != eof; r = l.peek() { - switch r { - case ']': - if l.currentTokenStop > l.currentTokenStart { - l.emit(tokenKeyGroupArray) - } - l.next() - if l.peek() != ']' { - break - } - l.next() - l.emit(tokenDoubleRightBracket) - return l.lexVoid - case '[': - return l.errorf("table array key cannot contain ']'") - default: - l.next() - } - } - return l.errorf("unclosed table array key") -} - -// Parse the key till "]" but only bare keys are supported -func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn { - for r := l.peek(); r != eof; r = l.peek() { - switch r { - case ']': - if l.currentTokenStop > l.currentTokenStart { - l.emit(tokenKeyGroup) - } - l.next() - l.emit(tokenRightBracket) - return l.lexVoid - case '[': - return l.errorf("table key cannot contain ']'") - default: - l.next() - } - } - return l.errorf("unclosed table key") -} - -func (l *tomlLexer) lexRightBracket() tomlLexStateFn { - l.next() - l.emit(tokenRightBracket) - if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '[' { - return l.errorf("cannot have ']' here") - } - l.brackets = l.brackets[:len(l.brackets)-1] - return l.lexRvalue -} - -type validRuneFn func(r rune) bool - -func isValidHexRune(r rune) bool { - return r >= 'a' && r <= 'f' || - r >= 'A' && r <= 'F' || - r >= '0' && r <= '9' || - r == '_' -} - -func isValidOctalRune(r rune) bool { - return r >= '0' && r <= '7' || r == '_' -} - -func isValidBinaryRune(r rune) bool { - return r == '0' || r == '1' || r == '_' -} - -func (l *tomlLexer) lexNumber() tomlLexStateFn { - r := l.peek() - - if r == '0' { - follow := l.peekString(2) - if len(follow) == 2 { - var isValidRune validRuneFn - switch follow[1] { - case 'x': - isValidRune = isValidHexRune - case 'o': - isValidRune = isValidOctalRune - case 'b': - isValidRune = isValidBinaryRune - default: - if follow[1] >= 'a' && follow[1] <= 'z' || follow[1] >= 'A' && follow[1] <= 'Z' { - return l.errorf("unknown number base: %s. possible options are x (hex) o (octal) b (binary)", string(follow[1])) - } - } - - if isValidRune != nil { - l.next() - l.next() - digitSeen := false - for { - next := l.peek() - if !isValidRune(next) { - break - } - digitSeen = true - l.next() - } - - if !digitSeen { - return l.errorf("number needs at least one digit") - } - - l.emit(tokenInteger) - - return l.lexRvalue - } - } - } - - if r == '+' || r == '-' { - l.next() - if l.follow("inf") { - return l.lexInf - } - if l.follow("nan") { - return l.lexNan - } - } - - pointSeen := false - expSeen := false - digitSeen := false - for { - next := l.peek() - if next == '.' { - if pointSeen { - return l.errorf("cannot have two dots in one float") - } - l.next() - if !isDigit(l.peek()) { - return l.errorf("float cannot end with a dot") - } - pointSeen = true - } else if next == 'e' || next == 'E' { - expSeen = true - l.next() - r := l.peek() - if r == '+' || r == '-' { - l.next() - } - } else if isDigit(next) { - digitSeen = true - l.next() - } else if next == '_' { - l.next() - } else { - break - } - if pointSeen && !digitSeen { - return l.errorf("cannot start float with a dot") - } - } - - if !digitSeen { - return l.errorf("no digit in that number") - } - if pointSeen || expSeen { - l.emit(tokenFloat) - } else { - l.emit(tokenInteger) - } - return l.lexRvalue -} - -func (l *tomlLexer) run() { - for state := l.lexVoid; state != nil; { - state = state() - } -} - -// Entry point -func lexToml(inputBytes []byte) []token { - runes := bytes.Runes(inputBytes) - l := &tomlLexer{ - input: runes, - tokens: make([]token, 0, 256), - line: 1, - col: 1, - endbufferLine: 1, - endbufferCol: 1, - } - l.run() - return l.tokens -} diff --git a/vendor/github.com/pelletier/go-toml/localtime.go b/vendor/github.com/pelletier/go-toml/localtime.go deleted file mode 100644 index 9dfe4b9e..00000000 --- a/vendor/github.com/pelletier/go-toml/localtime.go +++ /dev/null @@ -1,287 +0,0 @@ -// Implementation of TOML's local date/time. -// -// Copied over from Google's civil to avoid pulling all the Google dependencies. -// Originals: -// https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil.go -// Changes: -// * Renamed files from civil* to localtime*. -// * Package changed from civil to toml. -// * 'Local' prefix added to all structs. -// -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package civil implements types for civil time, a time-zone-independent -// representation of time that follows the rules of the proleptic -// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second -// minutes. -// -// Because they lack location information, these types do not represent unique -// moments or intervals of time. Use time.Time for that purpose. -package toml - -import ( - "fmt" - "time" -) - -// A LocalDate represents a date (year, month, day). -// -// This type does not include location information, and therefore does not -// describe a unique 24-hour timespan. -type LocalDate struct { - Year int // Year (e.g., 2014). - Month time.Month // Month of the year (January = 1, ...). - Day int // Day of the month, starting at 1. -} - -// LocalDateOf returns the LocalDate in which a time occurs in that time's location. -func LocalDateOf(t time.Time) LocalDate { - var d LocalDate - d.Year, d.Month, d.Day = t.Date() - return d -} - -// ParseLocalDate parses a string in RFC3339 full-date format and returns the date value it represents. -func ParseLocalDate(s string) (LocalDate, error) { - t, err := time.Parse("2006-01-02", s) - if err != nil { - return LocalDate{}, err - } - return LocalDateOf(t), nil -} - -// String returns the date in RFC3339 full-date format. -func (d LocalDate) String() string { - return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) -} - -// IsValid reports whether the date is valid. -func (d LocalDate) IsValid() bool { - return LocalDateOf(d.In(time.UTC)) == d -} - -// In returns the time corresponding to time 00:00:00 of the date in the location. -// -// In is always consistent with time.LocalDate, even when time.LocalDate returns a time -// on a different day. For example, if loc is America/Indiana/Vincennes, then both -// time.LocalDate(1955, time.May, 1, 0, 0, 0, 0, loc) -// and -// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}.In(loc) -// return 23:00:00 on April 30, 1955. -// -// In panics if loc is nil. -func (d LocalDate) In(loc *time.Location) time.Time { - return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc) -} - -// AddDays returns the date that is n days in the future. -// n can also be negative to go into the past. -func (d LocalDate) AddDays(n int) LocalDate { - return LocalDateOf(d.In(time.UTC).AddDate(0, 0, n)) -} - -// DaysSince returns the signed number of days between the date and s, not including the end day. -// This is the inverse operation to AddDays. -func (d LocalDate) DaysSince(s LocalDate) (days int) { - // We convert to Unix time so we do not have to worry about leap seconds: - // Unix time increases by exactly 86400 seconds per day. - deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix() - return int(deltaUnix / 86400) -} - -// Before reports whether d1 occurs before d2. -func (d1 LocalDate) Before(d2 LocalDate) bool { - if d1.Year != d2.Year { - return d1.Year < d2.Year - } - if d1.Month != d2.Month { - return d1.Month < d2.Month - } - return d1.Day < d2.Day -} - -// After reports whether d1 occurs after d2. -func (d1 LocalDate) After(d2 LocalDate) bool { - return d2.Before(d1) -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The output is the result of d.String(). -func (d LocalDate) MarshalText() ([]byte, error) { - return []byte(d.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The date is expected to be a string in a format accepted by ParseLocalDate. -func (d *LocalDate) UnmarshalText(data []byte) error { - var err error - *d, err = ParseLocalDate(string(data)) - return err -} - -// A LocalTime represents a time with nanosecond precision. -// -// This type does not include location information, and therefore does not -// describe a unique moment in time. -// -// This type exists to represent the TIME type in storage-based APIs like BigQuery. -// Most operations on Times are unlikely to be meaningful. Prefer the LocalDateTime type. -type LocalTime struct { - Hour int // The hour of the day in 24-hour format; range [0-23] - Minute int // The minute of the hour; range [0-59] - Second int // The second of the minute; range [0-59] - Nanosecond int // The nanosecond of the second; range [0-999999999] -} - -// LocalTimeOf returns the LocalTime representing the time of day in which a time occurs -// in that time's location. It ignores the date. -func LocalTimeOf(t time.Time) LocalTime { - var tm LocalTime - tm.Hour, tm.Minute, tm.Second = t.Clock() - tm.Nanosecond = t.Nanosecond() - return tm -} - -// ParseLocalTime parses a string and returns the time value it represents. -// ParseLocalTime accepts an extended form of the RFC3339 partial-time format. After -// the HH:MM:SS part of the string, an optional fractional part may appear, -// consisting of a decimal point followed by one to nine decimal digits. -// (RFC3339 admits only one digit after the decimal point). -func ParseLocalTime(s string) (LocalTime, error) { - t, err := time.Parse("15:04:05.999999999", s) - if err != nil { - return LocalTime{}, err - } - return LocalTimeOf(t), nil -} - -// String returns the date in the format described in ParseLocalTime. If Nanoseconds -// is zero, no fractional part will be generated. Otherwise, the result will -// end with a fractional part consisting of a decimal point and nine digits. -func (t LocalTime) String() string { - s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second) - if t.Nanosecond == 0 { - return s - } - return s + fmt.Sprintf(".%09d", t.Nanosecond) -} - -// IsValid reports whether the time is valid. -func (t LocalTime) IsValid() bool { - // Construct a non-zero time. - tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC) - return LocalTimeOf(tm) == t -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The output is the result of t.String(). -func (t LocalTime) MarshalText() ([]byte, error) { - return []byte(t.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The time is expected to be a string in a format accepted by ParseLocalTime. -func (t *LocalTime) UnmarshalText(data []byte) error { - var err error - *t, err = ParseLocalTime(string(data)) - return err -} - -// A LocalDateTime represents a date and time. -// -// This type does not include location information, and therefore does not -// describe a unique moment in time. -type LocalDateTime struct { - Date LocalDate - Time LocalTime -} - -// Note: We deliberately do not embed LocalDate into LocalDateTime, to avoid promoting AddDays and Sub. - -// LocalDateTimeOf returns the LocalDateTime in which a time occurs in that time's location. -func LocalDateTimeOf(t time.Time) LocalDateTime { - return LocalDateTime{ - Date: LocalDateOf(t), - Time: LocalTimeOf(t), - } -} - -// ParseLocalDateTime parses a string and returns the LocalDateTime it represents. -// ParseLocalDateTime accepts a variant of the RFC3339 date-time format that omits -// the time offset but includes an optional fractional time, as described in -// ParseLocalTime. Informally, the accepted format is -// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF] -// where the 'T' may be a lower-case 't'. -func ParseLocalDateTime(s string) (LocalDateTime, error) { - t, err := time.Parse("2006-01-02T15:04:05.999999999", s) - if err != nil { - t, err = time.Parse("2006-01-02t15:04:05.999999999", s) - if err != nil { - return LocalDateTime{}, err - } - } - return LocalDateTimeOf(t), nil -} - -// String returns the date in the format described in ParseLocalDate. -func (dt LocalDateTime) String() string { - return dt.Date.String() + "T" + dt.Time.String() -} - -// IsValid reports whether the datetime is valid. -func (dt LocalDateTime) IsValid() bool { - return dt.Date.IsValid() && dt.Time.IsValid() -} - -// In returns the time corresponding to the LocalDateTime in the given location. -// -// If the time is missing or ambigous at the location, In returns the same -// result as time.LocalDate. For example, if loc is America/Indiana/Vincennes, then -// both -// time.LocalDate(1955, time.May, 1, 0, 30, 0, 0, loc) -// and -// civil.LocalDateTime{ -// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}}, -// civil.LocalTime{Minute: 30}}.In(loc) -// return 23:30:00 on April 30, 1955. -// -// In panics if loc is nil. -func (dt LocalDateTime) In(loc *time.Location) time.Time { - return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc) -} - -// Before reports whether dt1 occurs before dt2. -func (dt1 LocalDateTime) Before(dt2 LocalDateTime) bool { - return dt1.In(time.UTC).Before(dt2.In(time.UTC)) -} - -// After reports whether dt1 occurs after dt2. -func (dt1 LocalDateTime) After(dt2 LocalDateTime) bool { - return dt2.Before(dt1) -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The output is the result of dt.String(). -func (dt LocalDateTime) MarshalText() ([]byte, error) { - return []byte(dt.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The datetime is expected to be a string in a format accepted by ParseLocalDateTime -func (dt *LocalDateTime) UnmarshalText(data []byte) error { - var err error - *dt, err = ParseLocalDateTime(string(data)) - return err -} diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go deleted file mode 100644 index 57127304..00000000 --- a/vendor/github.com/pelletier/go-toml/marshal.go +++ /dev/null @@ -1,1308 +0,0 @@ -package toml - -import ( - "bytes" - "encoding" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strconv" - "strings" - "time" -) - -const ( - tagFieldName = "toml" - tagFieldComment = "comment" - tagCommented = "commented" - tagMultiline = "multiline" - tagLiteral = "literal" - tagDefault = "default" -) - -type tomlOpts struct { - name string - nameFromTag bool - comment string - commented bool - multiline bool - literal bool - include bool - omitempty bool - defaultValue string -} - -type encOpts struct { - quoteMapKeys bool - arraysOneElementPerLine bool -} - -var encOptsDefaults = encOpts{ - quoteMapKeys: false, -} - -type annotation struct { - tag string - comment string - commented string - multiline string - literal string - defaultValue string -} - -var annotationDefault = annotation{ - tag: tagFieldName, - comment: tagFieldComment, - commented: tagCommented, - multiline: tagMultiline, - literal: tagLiteral, - defaultValue: tagDefault, -} - -type MarshalOrder int - -// Orders the Encoder can write the fields to the output stream. -const ( - // Sort fields alphabetically. - OrderAlphabetical MarshalOrder = iota + 1 - // Preserve the order the fields are encountered. For example, the order of fields in - // a struct. - OrderPreserve -) - -var timeType = reflect.TypeOf(time.Time{}) -var marshalerType = reflect.TypeOf(new(Marshaler)).Elem() -var unmarshalerType = reflect.TypeOf(new(Unmarshaler)).Elem() -var textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem() -var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() -var localDateType = reflect.TypeOf(LocalDate{}) -var localTimeType = reflect.TypeOf(LocalTime{}) -var localDateTimeType = reflect.TypeOf(LocalDateTime{}) -var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) - -// Check if the given marshal type maps to a Tree primitive -func isPrimitive(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isPrimitive(mtype.Elem()) - case reflect.Bool: - return true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Struct: - return isTimeType(mtype) - default: - return false - } -} - -func isTimeType(mtype reflect.Type) bool { - return mtype == timeType || mtype == localDateType || mtype == localDateTimeType || mtype == localTimeType -} - -// Check if the given marshal type maps to a Tree slice or array -func isTreeSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isTreeSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return isTree(mtype.Elem()) - default: - return false - } -} - -// Check if the given marshal type maps to a slice or array of a custom marshaler type -func isCustomMarshalerSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isCustomMarshalerSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return isCustomMarshaler(mtype.Elem()) || isCustomMarshaler(reflect.New(mtype.Elem()).Type()) - default: - return false - } -} - -// Check if the given marshal type maps to a slice or array of a text marshaler type -func isTextMarshalerSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isTextMarshalerSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return isTextMarshaler(mtype.Elem()) || isTextMarshaler(reflect.New(mtype.Elem()).Type()) - default: - return false - } -} - -// Check if the given marshal type maps to a non-Tree slice or array -func isOtherSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isOtherSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return !isTreeSequence(mtype) - default: - return false - } -} - -// Check if the given marshal type maps to a Tree -func isTree(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isTree(mtype.Elem()) - case reflect.Map: - return true - case reflect.Struct: - return !isPrimitive(mtype) - default: - return false - } -} - -func isCustomMarshaler(mtype reflect.Type) bool { - return mtype.Implements(marshalerType) -} - -func callCustomMarshaler(mval reflect.Value) ([]byte, error) { - return mval.Interface().(Marshaler).MarshalTOML() -} - -func isTextMarshaler(mtype reflect.Type) bool { - return mtype.Implements(textMarshalerType) && !isTimeType(mtype) -} - -func callTextMarshaler(mval reflect.Value) ([]byte, error) { - return mval.Interface().(encoding.TextMarshaler).MarshalText() -} - -func isCustomUnmarshaler(mtype reflect.Type) bool { - return mtype.Implements(unmarshalerType) -} - -func callCustomUnmarshaler(mval reflect.Value, tval interface{}) error { - return mval.Interface().(Unmarshaler).UnmarshalTOML(tval) -} - -func isTextUnmarshaler(mtype reflect.Type) bool { - return mtype.Implements(textUnmarshalerType) -} - -func callTextUnmarshaler(mval reflect.Value, text []byte) error { - return mval.Interface().(encoding.TextUnmarshaler).UnmarshalText(text) -} - -// Marshaler is the interface implemented by types that -// can marshal themselves into valid TOML. -type Marshaler interface { - MarshalTOML() ([]byte, error) -} - -// Unmarshaler is the interface implemented by types that -// can unmarshal a TOML description of themselves. -type Unmarshaler interface { - UnmarshalTOML(interface{}) error -} - -/* -Marshal returns the TOML encoding of v. Behavior is similar to the Go json -encoder, except that there is no concept of a Marshaler interface or MarshalTOML -function for sub-structs, and currently only definite types can be marshaled -(i.e. no `interface{}`). - -The following struct annotations are supported: - - toml:"Field" Overrides the field's name to output. - omitempty When set, empty values and groups are not emitted. - comment:"comment" Emits a # comment on the same line. This supports new lines. - commented:"true" Emits the value as commented. - -Note that pointers are automatically assigned the "omitempty" option, as TOML -explicitly does not handle null values (saying instead the label should be -dropped). - -Tree structural types and corresponding marshal types: - - *Tree (*)struct, (*)map[string]interface{} - []*Tree (*)[](*)struct, (*)[](*)map[string]interface{} - []interface{} (as interface{}) (*)[]primitive, (*)[]([]interface{}) - interface{} (*)primitive - -Tree primitive types and corresponding marshal types: - - uint64 uint, uint8-uint64, pointers to same - int64 int, int8-uint64, pointers to same - float64 float32, float64, pointers to same - string string, pointers to same - bool bool, pointers to same - time.LocalTime time.LocalTime{}, pointers to same - -For additional flexibility, use the Encoder API. -*/ -func Marshal(v interface{}) ([]byte, error) { - return NewEncoder(nil).marshal(v) -} - -// Encoder writes TOML values to an output stream. -type Encoder struct { - w io.Writer - encOpts - annotation - line int - col int - order MarshalOrder - promoteAnon bool - compactComments bool - indentation string -} - -// NewEncoder returns a new encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: w, - encOpts: encOptsDefaults, - annotation: annotationDefault, - line: 0, - col: 1, - order: OrderAlphabetical, - indentation: " ", - } -} - -// Encode writes the TOML encoding of v to the stream. -// -// See the documentation for Marshal for details. -func (e *Encoder) Encode(v interface{}) error { - b, err := e.marshal(v) - if err != nil { - return err - } - if _, err := e.w.Write(b); err != nil { - return err - } - return nil -} - -// QuoteMapKeys sets up the encoder to encode -// maps with string type keys with quoted TOML keys. -// -// This relieves the character limitations on map keys. -func (e *Encoder) QuoteMapKeys(v bool) *Encoder { - e.quoteMapKeys = v - return e -} - -// ArraysWithOneElementPerLine sets up the encoder to encode arrays -// with more than one element on multiple lines instead of one. -// -// For example: -// -// A = [1,2,3] -// -// Becomes -// -// A = [ -// 1, -// 2, -// 3, -// ] -func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder { - e.arraysOneElementPerLine = v - return e -} - -// Order allows to change in which order fields will be written to the output stream. -func (e *Encoder) Order(ord MarshalOrder) *Encoder { - e.order = ord - return e -} - -// Indentation allows to change indentation when marshalling. -func (e *Encoder) Indentation(indent string) *Encoder { - e.indentation = indent - return e -} - -// SetTagName allows changing default tag "toml" -func (e *Encoder) SetTagName(v string) *Encoder { - e.tag = v - return e -} - -// SetTagComment allows changing default tag "comment" -func (e *Encoder) SetTagComment(v string) *Encoder { - e.comment = v - return e -} - -// SetTagCommented allows changing default tag "commented" -func (e *Encoder) SetTagCommented(v string) *Encoder { - e.commented = v - return e -} - -// SetTagMultiline allows changing default tag "multiline" -func (e *Encoder) SetTagMultiline(v string) *Encoder { - e.multiline = v - return e -} - -// PromoteAnonymous allows to change how anonymous struct fields are marshaled. -// Usually, they are marshaled as if the inner exported fields were fields in -// the outer struct. However, if an anonymous struct field is given a name in -// its TOML tag, it is treated like a regular struct field with that name. -// rather than being anonymous. -// -// In case anonymous promotion is enabled, all anonymous structs are promoted -// and treated like regular struct fields. -func (e *Encoder) PromoteAnonymous(promote bool) *Encoder { - e.promoteAnon = promote - return e -} - -// CompactComments removes the new line before each comment in the tree. -func (e *Encoder) CompactComments(cc bool) *Encoder { - e.compactComments = cc - return e -} - -func (e *Encoder) marshal(v interface{}) ([]byte, error) { - // Check if indentation is valid - for _, char := range e.indentation { - if !isSpace(char) { - return []byte{}, fmt.Errorf("invalid indentation: must only contains space or tab characters") - } - } - - mtype := reflect.TypeOf(v) - if mtype == nil { - return []byte{}, errors.New("nil cannot be marshaled to TOML") - } - - switch mtype.Kind() { - case reflect.Struct, reflect.Map: - case reflect.Ptr: - if mtype.Elem().Kind() != reflect.Struct { - return []byte{}, errors.New("Only pointer to struct can be marshaled to TOML") - } - if reflect.ValueOf(v).IsNil() { - return []byte{}, errors.New("nil pointer cannot be marshaled to TOML") - } - default: - return []byte{}, errors.New("Only a struct or map can be marshaled to TOML") - } - - sval := reflect.ValueOf(v) - if isCustomMarshaler(mtype) { - return callCustomMarshaler(sval) - } - if isTextMarshaler(mtype) { - return callTextMarshaler(sval) - } - t, err := e.valueToTree(mtype, sval) - if err != nil { - return []byte{}, err - } - - var buf bytes.Buffer - _, err = t.writeToOrdered(&buf, "", "", 0, e.arraysOneElementPerLine, e.order, e.indentation, e.compactComments, false) - - return buf.Bytes(), err -} - -// Create next tree with a position based on Encoder.line -func (e *Encoder) nextTree() *Tree { - return newTreeWithPosition(Position{Line: e.line, Col: 1}) -} - -// Convert given marshal struct or map value to toml tree -func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) { - if mtype.Kind() == reflect.Ptr { - return e.valueToTree(mtype.Elem(), mval.Elem()) - } - tval := e.nextTree() - switch mtype.Kind() { - case reflect.Struct: - switch mval.Interface().(type) { - case Tree: - reflect.ValueOf(tval).Elem().Set(mval) - default: - for i := 0; i < mtype.NumField(); i++ { - mtypef, mvalf := mtype.Field(i), mval.Field(i) - opts := tomlOptions(mtypef, e.annotation) - if opts.include && ((mtypef.Type.Kind() != reflect.Interface && !opts.omitempty) || !isZero(mvalf)) { - val, err := e.valueToToml(mtypef.Type, mvalf) - if err != nil { - return nil, err - } - if tree, ok := val.(*Tree); ok && mtypef.Anonymous && !opts.nameFromTag && !e.promoteAnon { - e.appendTree(tval, tree) - } else { - val = e.wrapTomlValue(val, tval) - tval.SetPathWithOptions([]string{opts.name}, SetOptions{ - Comment: opts.comment, - Commented: opts.commented, - Multiline: opts.multiline, - Literal: opts.literal, - }, val) - } - } - } - } - case reflect.Map: - keys := mval.MapKeys() - if e.order == OrderPreserve && len(keys) > 0 { - // Sorting []reflect.Value is not straight forward. - // - // OrderPreserve will support deterministic results when string is used - // as the key to maps. - typ := keys[0].Type() - kind := keys[0].Kind() - if kind == reflect.String { - ikeys := make([]string, len(keys)) - for i := range keys { - ikeys[i] = keys[i].Interface().(string) - } - sort.Strings(ikeys) - for i := range ikeys { - keys[i] = reflect.ValueOf(ikeys[i]).Convert(typ) - } - } - } - for _, key := range keys { - mvalf := mval.MapIndex(key) - if (mtype.Elem().Kind() == reflect.Ptr || mtype.Elem().Kind() == reflect.Interface) && mvalf.IsNil() { - continue - } - val, err := e.valueToToml(mtype.Elem(), mvalf) - if err != nil { - return nil, err - } - val = e.wrapTomlValue(val, tval) - if e.quoteMapKeys { - keyStr, err := tomlValueStringRepresentation(key.String(), "", "", e.order, e.arraysOneElementPerLine) - if err != nil { - return nil, err - } - tval.SetPath([]string{keyStr}, val) - } else { - tval.SetPath([]string{key.String()}, val) - } - } - } - return tval, nil -} - -// Convert given marshal slice to slice of Toml trees -func (e *Encoder) valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) { - tval := make([]*Tree, mval.Len(), mval.Len()) - for i := 0; i < mval.Len(); i++ { - val, err := e.valueToTree(mtype.Elem(), mval.Index(i)) - if err != nil { - return nil, err - } - tval[i] = val - } - return tval, nil -} - -// Convert given marshal slice to slice of toml values -func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) { - tval := make([]interface{}, mval.Len(), mval.Len()) - for i := 0; i < mval.Len(); i++ { - val, err := e.valueToToml(mtype.Elem(), mval.Index(i)) - if err != nil { - return nil, err - } - tval[i] = val - } - return tval, nil -} - -// Convert given marshal value to toml value -func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { - if mtype.Kind() == reflect.Ptr { - switch { - case isCustomMarshaler(mtype): - return callCustomMarshaler(mval) - case isTextMarshaler(mtype): - b, err := callTextMarshaler(mval) - return string(b), err - default: - return e.valueToToml(mtype.Elem(), mval.Elem()) - } - } - if mtype.Kind() == reflect.Interface { - return e.valueToToml(mval.Elem().Type(), mval.Elem()) - } - switch { - case isCustomMarshaler(mtype): - return callCustomMarshaler(mval) - case isTextMarshaler(mtype): - b, err := callTextMarshaler(mval) - return string(b), err - case isTree(mtype): - return e.valueToTree(mtype, mval) - case isOtherSequence(mtype), isCustomMarshalerSequence(mtype), isTextMarshalerSequence(mtype): - return e.valueToOtherSlice(mtype, mval) - case isTreeSequence(mtype): - return e.valueToTreeSlice(mtype, mval) - default: - switch mtype.Kind() { - case reflect.Bool: - return mval.Bool(), nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) { - return fmt.Sprint(mval), nil - } - return mval.Int(), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return mval.Uint(), nil - case reflect.Float32, reflect.Float64: - return mval.Float(), nil - case reflect.String: - return mval.String(), nil - case reflect.Struct: - return mval.Interface(), nil - default: - return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind()) - } - } -} - -func (e *Encoder) appendTree(t, o *Tree) error { - for key, value := range o.values { - if _, ok := t.values[key]; ok { - continue - } - if tomlValue, ok := value.(*tomlValue); ok { - tomlValue.position.Col = t.position.Col - } - t.values[key] = value - } - return nil -} - -// Create a toml value with the current line number as the position line -func (e *Encoder) wrapTomlValue(val interface{}, parent *Tree) interface{} { - _, isTree := val.(*Tree) - _, isTreeS := val.([]*Tree) - if isTree || isTreeS { - e.line++ - return val - } - - ret := &tomlValue{ - value: val, - position: Position{ - e.line, - parent.position.Col, - }, - } - e.line++ - return ret -} - -// Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v. -// Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for -// sub-structs, and only definite types can be unmarshaled. -func (t *Tree) Unmarshal(v interface{}) error { - d := Decoder{tval: t, tagName: tagFieldName} - return d.unmarshal(v) -} - -// Marshal returns the TOML encoding of Tree. -// See Marshal() documentation for types mapping table. -func (t *Tree) Marshal() ([]byte, error) { - var buf bytes.Buffer - _, err := t.WriteTo(&buf) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// Unmarshal parses the TOML-encoded data and stores the result in the value -// pointed to by v. Behavior is similar to the Go json encoder, except that there -// is no concept of an Unmarshaler interface or UnmarshalTOML function for -// sub-structs, and currently only definite types can be unmarshaled to (i.e. no -// `interface{}`). -// -// The following struct annotations are supported: -// -// toml:"Field" Overrides the field's name to map to. -// default:"foo" Provides a default value. -// -// For default values, only fields of the following types are supported: -// * string -// * bool -// * int -// * int64 -// * float64 -// -// See Marshal() documentation for types mapping table. -func Unmarshal(data []byte, v interface{}) error { - t, err := LoadReader(bytes.NewReader(data)) - if err != nil { - return err - } - return t.Unmarshal(v) -} - -// Decoder reads and decodes TOML values from an input stream. -type Decoder struct { - r io.Reader - tval *Tree - encOpts - tagName string - strict bool - visitor visitorState -} - -// NewDecoder returns a new decoder that reads from r. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{ - r: r, - encOpts: encOptsDefaults, - tagName: tagFieldName, - } -} - -// Decode reads a TOML-encoded value from it's input -// and unmarshals it in the value pointed at by v. -// -// See the documentation for Marshal for details. -func (d *Decoder) Decode(v interface{}) error { - var err error - d.tval, err = LoadReader(d.r) - if err != nil { - return err - } - return d.unmarshal(v) -} - -// SetTagName allows changing default tag "toml" -func (d *Decoder) SetTagName(v string) *Decoder { - d.tagName = v - return d -} - -// Strict allows changing to strict decoding. Any fields that are found in the -// input data and do not have a corresponding struct member cause an error. -func (d *Decoder) Strict(strict bool) *Decoder { - d.strict = strict - return d -} - -func (d *Decoder) unmarshal(v interface{}) error { - mtype := reflect.TypeOf(v) - if mtype == nil { - return errors.New("nil cannot be unmarshaled from TOML") - } - if mtype.Kind() != reflect.Ptr { - return errors.New("only a pointer to struct or map can be unmarshaled from TOML") - } - - elem := mtype.Elem() - - switch elem.Kind() { - case reflect.Struct, reflect.Map: - case reflect.Interface: - elem = mapStringInterfaceType - default: - return errors.New("only a pointer to struct or map can be unmarshaled from TOML") - } - - if reflect.ValueOf(v).IsNil() { - return errors.New("nil pointer cannot be unmarshaled from TOML") - } - - vv := reflect.ValueOf(v).Elem() - - if d.strict { - d.visitor = newVisitorState(d.tval) - } - - sval, err := d.valueFromTree(elem, d.tval, &vv) - if err != nil { - return err - } - if err := d.visitor.validate(); err != nil { - return err - } - reflect.ValueOf(v).Elem().Set(sval) - return nil -} - -// Convert toml tree to marshal struct or map, using marshal type. When mval1 -// is non-nil, merge fields into the given value instead of allocating a new one. -func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.Value) (reflect.Value, error) { - if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval, mval1) - } - - // Check if pointer to value implements the Unmarshaler interface. - if mvalPtr := reflect.New(mtype); isCustomUnmarshaler(mvalPtr.Type()) { - d.visitor.visitAll() - - if tval == nil { - return mvalPtr.Elem(), nil - } - - if err := callCustomUnmarshaler(mvalPtr, tval.ToMap()); err != nil { - return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err) - } - return mvalPtr.Elem(), nil - } - - var mval reflect.Value - switch mtype.Kind() { - case reflect.Struct: - if mval1 != nil { - mval = *mval1 - } else { - mval = reflect.New(mtype).Elem() - } - - switch mval.Interface().(type) { - case Tree: - mval.Set(reflect.ValueOf(tval).Elem()) - default: - for i := 0; i < mtype.NumField(); i++ { - mtypef := mtype.Field(i) - an := annotation{tag: d.tagName} - opts := tomlOptions(mtypef, an) - if !opts.include { - continue - } - baseKey := opts.name - keysToTry := []string{ - baseKey, - strings.ToLower(baseKey), - strings.ToTitle(baseKey), - strings.ToLower(string(baseKey[0])) + baseKey[1:], - } - - found := false - if tval != nil { - for _, key := range keysToTry { - exists := tval.HasPath([]string{key}) - if !exists { - continue - } - - d.visitor.push(key) - val := tval.GetPath([]string{key}) - fval := mval.Field(i) - mvalf, err := d.valueFromToml(mtypef.Type, val, &fval) - if err != nil { - return mval, formatError(err, tval.GetPositionPath([]string{key})) - } - mval.Field(i).Set(mvalf) - found = true - d.visitor.pop() - break - } - } - - if !found && opts.defaultValue != "" { - mvalf := mval.Field(i) - var val interface{} - var err error - switch mvalf.Kind() { - case reflect.String: - val = opts.defaultValue - case reflect.Bool: - val, err = strconv.ParseBool(opts.defaultValue) - case reflect.Uint: - val, err = strconv.ParseUint(opts.defaultValue, 10, 0) - case reflect.Uint8: - val, err = strconv.ParseUint(opts.defaultValue, 10, 8) - case reflect.Uint16: - val, err = strconv.ParseUint(opts.defaultValue, 10, 16) - case reflect.Uint32: - val, err = strconv.ParseUint(opts.defaultValue, 10, 32) - case reflect.Uint64: - val, err = strconv.ParseUint(opts.defaultValue, 10, 64) - case reflect.Int: - val, err = strconv.ParseInt(opts.defaultValue, 10, 0) - case reflect.Int8: - val, err = strconv.ParseInt(opts.defaultValue, 10, 8) - case reflect.Int16: - val, err = strconv.ParseInt(opts.defaultValue, 10, 16) - case reflect.Int32: - val, err = strconv.ParseInt(opts.defaultValue, 10, 32) - case reflect.Int64: - // Check if the provided number has a non-numeric extension. - var hasExtension bool - if len(opts.defaultValue) > 0 { - lastChar := opts.defaultValue[len(opts.defaultValue)-1] - if lastChar < '0' || lastChar > '9' { - hasExtension = true - } - } - // If the value is a time.Duration with extension, parse as duration. - // If the value is an int64 or a time.Duration without extension, parse as number. - if hasExtension && mvalf.Type().String() == "time.Duration" { - val, err = time.ParseDuration(opts.defaultValue) - } else { - val, err = strconv.ParseInt(opts.defaultValue, 10, 64) - } - case reflect.Float32: - val, err = strconv.ParseFloat(opts.defaultValue, 32) - case reflect.Float64: - val, err = strconv.ParseFloat(opts.defaultValue, 64) - default: - return mvalf, fmt.Errorf("unsupported field type for default option") - } - - if err != nil { - return mvalf, err - } - mvalf.Set(reflect.ValueOf(val).Convert(mvalf.Type())) - } - - // save the old behavior above and try to check structs - if !found && opts.defaultValue == "" && mtypef.Type.Kind() == reflect.Struct { - tmpTval := tval - if !mtypef.Anonymous { - tmpTval = nil - } - fval := mval.Field(i) - v, err := d.valueFromTree(mtypef.Type, tmpTval, &fval) - if err != nil { - return v, err - } - mval.Field(i).Set(v) - } - } - } - case reflect.Map: - mval = reflect.MakeMap(mtype) - for _, key := range tval.Keys() { - d.visitor.push(key) - // TODO: path splits key - val := tval.GetPath([]string{key}) - mvalf, err := d.valueFromToml(mtype.Elem(), val, nil) - if err != nil { - return mval, formatError(err, tval.GetPositionPath([]string{key})) - } - mval.SetMapIndex(reflect.ValueOf(key).Convert(mtype.Key()), mvalf) - d.visitor.pop() - } - } - return mval, nil -} - -// Convert toml value to marshal struct/map slice, using marshal type -func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) { - mval, err := makeSliceOrArray(mtype, len(tval)) - if err != nil { - return mval, err - } - - for i := 0; i < len(tval); i++ { - d.visitor.push(strconv.Itoa(i)) - val, err := d.valueFromTree(mtype.Elem(), tval[i], nil) - if err != nil { - return mval, err - } - mval.Index(i).Set(val) - d.visitor.pop() - } - return mval, nil -} - -// Convert toml value to marshal primitive slice, using marshal type -func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { - mval, err := makeSliceOrArray(mtype, len(tval)) - if err != nil { - return mval, err - } - - for i := 0; i < len(tval); i++ { - val, err := d.valueFromToml(mtype.Elem(), tval[i], nil) - if err != nil { - return mval, err - } - mval.Index(i).Set(val) - } - return mval, nil -} - -// Convert toml value to marshal primitive slice, using marshal type -func (d *Decoder) valueFromOtherSliceI(mtype reflect.Type, tval interface{}) (reflect.Value, error) { - val := reflect.ValueOf(tval) - length := val.Len() - - mval, err := makeSliceOrArray(mtype, length) - if err != nil { - return mval, err - } - - for i := 0; i < length; i++ { - val, err := d.valueFromToml(mtype.Elem(), val.Index(i).Interface(), nil) - if err != nil { - return mval, err - } - mval.Index(i).Set(val) - } - return mval, nil -} - -// Create a new slice or a new array with specified length -func makeSliceOrArray(mtype reflect.Type, tLength int) (reflect.Value, error) { - var mval reflect.Value - switch mtype.Kind() { - case reflect.Slice: - mval = reflect.MakeSlice(mtype, tLength, tLength) - case reflect.Array: - mval = reflect.New(reflect.ArrayOf(mtype.Len(), mtype.Elem())).Elem() - if tLength > mtype.Len() { - return mval, fmt.Errorf("unmarshal: TOML array length (%v) exceeds destination array length (%v)", tLength, mtype.Len()) - } - } - return mval, nil -} - -// Convert toml value to marshal value, using marshal type. When mval1 is non-nil -// and the given type is a struct value, merge fields into it. -func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { - if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval, mval1) - } - - switch t := tval.(type) { - case *Tree: - var mval11 *reflect.Value - if mtype.Kind() == reflect.Struct { - mval11 = mval1 - } - - if isTree(mtype) { - return d.valueFromTree(mtype, t, mval11) - } - - if mtype.Kind() == reflect.Interface { - if mval1 == nil || mval1.IsNil() { - return d.valueFromTree(reflect.TypeOf(map[string]interface{}{}), t, nil) - } else { - return d.valueFromToml(mval1.Elem().Type(), t, nil) - } - } - - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval) - case []*Tree: - if isTreeSequence(mtype) { - return d.valueFromTreeSlice(mtype, t) - } - if mtype.Kind() == reflect.Interface { - if mval1 == nil || mval1.IsNil() { - return d.valueFromTreeSlice(reflect.TypeOf([]map[string]interface{}{}), t) - } else { - ival := mval1.Elem() - return d.valueFromToml(mval1.Elem().Type(), t, &ival) - } - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval) - case []interface{}: - d.visitor.visit() - if isOtherSequence(mtype) { - return d.valueFromOtherSlice(mtype, t) - } - if mtype.Kind() == reflect.Interface { - if mval1 == nil || mval1.IsNil() { - return d.valueFromOtherSlice(reflect.TypeOf([]interface{}{}), t) - } else { - ival := mval1.Elem() - return d.valueFromToml(mval1.Elem().Type(), t, &ival) - } - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval) - default: - d.visitor.visit() - mvalPtr := reflect.New(mtype) - - // Check if pointer to value implements the Unmarshaler interface. - if isCustomUnmarshaler(mvalPtr.Type()) { - if err := callCustomUnmarshaler(mvalPtr, tval); err != nil { - return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err) - } - return mvalPtr.Elem(), nil - } - - // Check if pointer to value implements the encoding.TextUnmarshaler. - if isTextUnmarshaler(mvalPtr.Type()) && !isTimeType(mtype) { - if err := d.unmarshalText(tval, mvalPtr); err != nil { - return reflect.ValueOf(nil), fmt.Errorf("unmarshal text: %v", err) - } - return mvalPtr.Elem(), nil - } - - switch mtype.Kind() { - case reflect.Bool, reflect.Struct: - val := reflect.ValueOf(tval) - - switch val.Type() { - case localDateType: - localDate := val.Interface().(LocalDate) - switch mtype { - case timeType: - return reflect.ValueOf(time.Date(localDate.Year, localDate.Month, localDate.Day, 0, 0, 0, 0, time.Local)), nil - } - case localDateTimeType: - localDateTime := val.Interface().(LocalDateTime) - switch mtype { - case timeType: - return reflect.ValueOf(time.Date( - localDateTime.Date.Year, - localDateTime.Date.Month, - localDateTime.Date.Day, - localDateTime.Time.Hour, - localDateTime.Time.Minute, - localDateTime.Time.Second, - localDateTime.Time.Nanosecond, - time.Local)), nil - } - } - - // if this passes for when mtype is reflect.Struct, tval is a time.LocalTime - if !val.Type().ConvertibleTo(mtype) { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.String: - val := reflect.ValueOf(tval) - // stupidly, int64 is convertible to string. So special case this. - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - val := reflect.ValueOf(tval) - if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) && val.Kind() == reflect.String { - d, err := time.ParseDuration(val.String()) - if err != nil { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v. %s", tval, tval, mtype.String(), err) - } - return reflect.ValueOf(d), nil - } - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Convert(reflect.TypeOf(int64(0))).Int()) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - - if val.Type().Kind() != reflect.Uint64 && val.Convert(reflect.TypeOf(int(1))).Int() < 0 { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String()) - } - if reflect.Indirect(reflect.New(mtype)).OverflowUint(val.Convert(reflect.TypeOf(uint64(0))).Uint()) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Float32, reflect.Float64: - val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Convert(reflect.TypeOf(float64(0))).Float()) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Interface: - if mval1 == nil || mval1.IsNil() { - return reflect.ValueOf(tval), nil - } else { - ival := mval1.Elem() - return d.valueFromToml(mval1.Elem().Type(), t, &ival) - } - case reflect.Slice, reflect.Array: - if isOtherSequence(mtype) && isOtherSequence(reflect.TypeOf(t)) { - return d.valueFromOtherSliceI(mtype, t) - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) - default: - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) - } - } -} - -func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { - var melem *reflect.Value - - if mval1 != nil && !mval1.IsNil() && (mtype.Elem().Kind() == reflect.Struct || mtype.Elem().Kind() == reflect.Interface) { - elem := mval1.Elem() - melem = &elem - } - - val, err := d.valueFromToml(mtype.Elem(), tval, melem) - if err != nil { - return reflect.ValueOf(nil), err - } - mval := reflect.New(mtype.Elem()) - mval.Elem().Set(val) - return mval, nil -} - -func (d *Decoder) unmarshalText(tval interface{}, mval reflect.Value) error { - var buf bytes.Buffer - fmt.Fprint(&buf, tval) - return callTextUnmarshaler(mval, buf.Bytes()) -} - -func tomlOptions(vf reflect.StructField, an annotation) tomlOpts { - tag := vf.Tag.Get(an.tag) - parse := strings.Split(tag, ",") - var comment string - if c := vf.Tag.Get(an.comment); c != "" { - comment = c - } - commented, _ := strconv.ParseBool(vf.Tag.Get(an.commented)) - multiline, _ := strconv.ParseBool(vf.Tag.Get(an.multiline)) - literal, _ := strconv.ParseBool(vf.Tag.Get(an.literal)) - defaultValue := vf.Tag.Get(tagDefault) - result := tomlOpts{ - name: vf.Name, - nameFromTag: false, - comment: comment, - commented: commented, - multiline: multiline, - literal: literal, - include: true, - omitempty: false, - defaultValue: defaultValue, - } - if parse[0] != "" { - if parse[0] == "-" && len(parse) == 1 { - result.include = false - } else { - result.name = strings.Trim(parse[0], " ") - result.nameFromTag = true - } - } - if vf.PkgPath != "" { - result.include = false - } - if len(parse) > 1 && strings.Trim(parse[1], " ") == "omitempty" { - result.omitempty = true - } - if vf.Type.Kind() == reflect.Ptr { - result.omitempty = true - } - return result -} - -func isZero(val reflect.Value) bool { - switch val.Type().Kind() { - case reflect.Slice, reflect.Array, reflect.Map: - return val.Len() == 0 - default: - return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface()) - } -} - -func formatError(err error, pos Position) error { - if err.Error()[0] == '(' { // Error already contains position information - return err - } - return fmt.Errorf("%s: %s", pos, err) -} - -// visitorState keeps track of which keys were unmarshaled. -type visitorState struct { - tree *Tree - path []string - keys map[string]struct{} - active bool -} - -func newVisitorState(tree *Tree) visitorState { - path, result := []string{}, map[string]struct{}{} - insertKeys(path, result, tree) - return visitorState{ - tree: tree, - path: path[:0], - keys: result, - active: true, - } -} - -func (s *visitorState) push(key string) { - if s.active { - s.path = append(s.path, key) - } -} - -func (s *visitorState) pop() { - if s.active { - s.path = s.path[:len(s.path)-1] - } -} - -func (s *visitorState) visit() { - if s.active { - delete(s.keys, strings.Join(s.path, ".")) - } -} - -func (s *visitorState) visitAll() { - if s.active { - for k := range s.keys { - if strings.HasPrefix(k, strings.Join(s.path, ".")) { - delete(s.keys, k) - } - } - } -} - -func (s *visitorState) validate() error { - if !s.active { - return nil - } - undecoded := make([]string, 0, len(s.keys)) - for key := range s.keys { - undecoded = append(undecoded, key) - } - sort.Strings(undecoded) - if len(undecoded) > 0 { - return fmt.Errorf("undecoded keys: %q", undecoded) - } - return nil -} - -func insertKeys(path []string, m map[string]struct{}, tree *Tree) { - for k, v := range tree.values { - switch node := v.(type) { - case []*Tree: - for i, item := range node { - insertKeys(append(path, k, strconv.Itoa(i)), m, item) - } - case *Tree: - insertKeys(append(path, k), m, node) - case *tomlValue: - m[strings.Join(append(path, k), ".")] = struct{}{} - } - } -} diff --git a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml deleted file mode 100644 index 792b72ed..00000000 --- a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml +++ /dev/null @@ -1,39 +0,0 @@ -title = "TOML Marshal Testing" - -[basic_lists] - floats = [12.3,45.6,78.9] - bools = [true,false,true] - dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] - ints = [8001,8001,8002] - uints = [5002,5003] - strings = ["One","Two","Three"] - -[[subdocptrs]] - name = "Second" - -[basic_map] - one = "one" - two = "two" - -[subdoc] - - [subdoc.second] - name = "Second" - - [subdoc.first] - name = "First" - -[basic] - uint = 5001 - bool = true - float = 123.4 - float64 = 123.456782132399 - int = 5000 - string = "Bite me" - date = 1979-05-27T07:32:00Z - -[[subdoclist]] - name = "List.First" - -[[subdoclist]] - name = "List.Second" diff --git a/vendor/github.com/pelletier/go-toml/marshal_test.toml b/vendor/github.com/pelletier/go-toml/marshal_test.toml deleted file mode 100644 index ba5e110b..00000000 --- a/vendor/github.com/pelletier/go-toml/marshal_test.toml +++ /dev/null @@ -1,39 +0,0 @@ -title = "TOML Marshal Testing" - -[basic] - bool = true - date = 1979-05-27T07:32:00Z - float = 123.4 - float64 = 123.456782132399 - int = 5000 - string = "Bite me" - uint = 5001 - -[basic_lists] - bools = [true,false,true] - dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] - floats = [12.3,45.6,78.9] - ints = [8001,8001,8002] - strings = ["One","Two","Three"] - uints = [5002,5003] - -[basic_map] - one = "one" - two = "two" - -[subdoc] - - [subdoc.first] - name = "First" - - [subdoc.second] - name = "Second" - -[[subdoclist]] - name = "List.First" - -[[subdoclist]] - name = "List.Second" - -[[subdocptrs]] - name = "Second" diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go deleted file mode 100644 index b3726d0d..00000000 --- a/vendor/github.com/pelletier/go-toml/parser.go +++ /dev/null @@ -1,507 +0,0 @@ -// TOML Parser. - -package toml - -import ( - "errors" - "fmt" - "math" - "reflect" - "strconv" - "strings" - "time" -) - -type tomlParser struct { - flowIdx int - flow []token - tree *Tree - currentTable []string - seenTableKeys []string -} - -type tomlParserStateFn func() tomlParserStateFn - -// Formats and panics an error message based on a token -func (p *tomlParser) raiseError(tok *token, msg string, args ...interface{}) { - panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...)) -} - -func (p *tomlParser) run() { - for state := p.parseStart; state != nil; { - state = state() - } -} - -func (p *tomlParser) peek() *token { - if p.flowIdx >= len(p.flow) { - return nil - } - return &p.flow[p.flowIdx] -} - -func (p *tomlParser) assume(typ tokenType) { - tok := p.getToken() - if tok == nil { - p.raiseError(tok, "was expecting token %s, but token stream is empty", tok) - } - if tok.typ != typ { - p.raiseError(tok, "was expecting token %s, but got %s instead", typ, tok) - } -} - -func (p *tomlParser) getToken() *token { - tok := p.peek() - if tok == nil { - return nil - } - p.flowIdx++ - return tok -} - -func (p *tomlParser) parseStart() tomlParserStateFn { - tok := p.peek() - - // end of stream, parsing is finished - if tok == nil { - return nil - } - - switch tok.typ { - case tokenDoubleLeftBracket: - return p.parseGroupArray - case tokenLeftBracket: - return p.parseGroup - case tokenKey: - return p.parseAssign - case tokenEOF: - return nil - case tokenError: - p.raiseError(tok, "parsing error: %s", tok.String()) - default: - p.raiseError(tok, "unexpected token %s", tok.typ) - } - return nil -} - -func (p *tomlParser) parseGroupArray() tomlParserStateFn { - startToken := p.getToken() // discard the [[ - key := p.getToken() - if key.typ != tokenKeyGroupArray { - p.raiseError(key, "unexpected token %s, was expecting a table array key", key) - } - - // get or create table array element at the indicated part in the path - keys, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid table array key: %s", err) - } - p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries - destTree := p.tree.GetPath(keys) - var array []*Tree - if destTree == nil { - array = make([]*Tree, 0) - } else if target, ok := destTree.([]*Tree); ok && target != nil { - array = destTree.([]*Tree) - } else { - p.raiseError(key, "key %s is already assigned and not of type table array", key) - } - p.currentTable = keys - - // add a new tree to the end of the table array - newTree := newTree() - newTree.position = startToken.Position - array = append(array, newTree) - p.tree.SetPath(p.currentTable, array) - - // remove all keys that were children of this table array - prefix := key.val + "." - found := false - for ii := 0; ii < len(p.seenTableKeys); { - tableKey := p.seenTableKeys[ii] - if strings.HasPrefix(tableKey, prefix) { - p.seenTableKeys = append(p.seenTableKeys[:ii], p.seenTableKeys[ii+1:]...) - } else { - found = (tableKey == key.val) - ii++ - } - } - - // keep this key name from use by other kinds of assignments - if !found { - p.seenTableKeys = append(p.seenTableKeys, key.val) - } - - // move to next parser state - p.assume(tokenDoubleRightBracket) - return p.parseStart -} - -func (p *tomlParser) parseGroup() tomlParserStateFn { - startToken := p.getToken() // discard the [ - key := p.getToken() - if key.typ != tokenKeyGroup { - p.raiseError(key, "unexpected token %s, was expecting a table key", key) - } - for _, item := range p.seenTableKeys { - if item == key.val { - p.raiseError(key, "duplicated tables") - } - } - - p.seenTableKeys = append(p.seenTableKeys, key.val) - keys, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid table array key: %s", err) - } - if err := p.tree.createSubTree(keys, startToken.Position); err != nil { - p.raiseError(key, "%s", err) - } - destTree := p.tree.GetPath(keys) - if target, ok := destTree.(*Tree); ok && target != nil && target.inline { - p.raiseError(key, "could not re-define exist inline table or its sub-table : %s", - strings.Join(keys, ".")) - } - p.assume(tokenRightBracket) - p.currentTable = keys - return p.parseStart -} - -func (p *tomlParser) parseAssign() tomlParserStateFn { - key := p.getToken() - p.assume(tokenEqual) - - parsedKey, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid key: %s", err.Error()) - } - - value := p.parseRvalue() - var tableKey []string - if len(p.currentTable) > 0 { - tableKey = p.currentTable - } else { - tableKey = []string{} - } - - prefixKey := parsedKey[0 : len(parsedKey)-1] - tableKey = append(tableKey, prefixKey...) - - // find the table to assign, looking out for arrays of tables - var targetNode *Tree - switch node := p.tree.GetPath(tableKey).(type) { - case []*Tree: - targetNode = node[len(node)-1] - case *Tree: - targetNode = node - case nil: - // create intermediate - if err := p.tree.createSubTree(tableKey, key.Position); err != nil { - p.raiseError(key, "could not create intermediate group: %s", err) - } - targetNode = p.tree.GetPath(tableKey).(*Tree) - default: - p.raiseError(key, "Unknown table type for path: %s", - strings.Join(tableKey, ".")) - } - - if targetNode.inline { - p.raiseError(key, "could not add key or sub-table to exist inline table or its sub-table : %s", - strings.Join(tableKey, ".")) - } - - // assign value to the found table - keyVal := parsedKey[len(parsedKey)-1] - localKey := []string{keyVal} - finalKey := append(tableKey, keyVal) - if targetNode.GetPath(localKey) != nil { - p.raiseError(key, "The following key was defined twice: %s", - strings.Join(finalKey, ".")) - } - var toInsert interface{} - - switch value.(type) { - case *Tree, []*Tree: - toInsert = value - default: - toInsert = &tomlValue{value: value, position: key.Position} - } - targetNode.values[keyVal] = toInsert - return p.parseStart -} - -var errInvalidUnderscore = errors.New("invalid use of _ in number") - -func numberContainsInvalidUnderscore(value string) error { - // For large numbers, you may use underscores between digits to enhance - // readability. Each underscore must be surrounded by at least one digit on - // each side. - - hasBefore := false - for idx, r := range value { - if r == '_' { - if !hasBefore || idx+1 >= len(value) { - // can't end with an underscore - return errInvalidUnderscore - } - } - hasBefore = isDigit(r) - } - return nil -} - -var errInvalidUnderscoreHex = errors.New("invalid use of _ in hex number") - -func hexNumberContainsInvalidUnderscore(value string) error { - hasBefore := false - for idx, r := range value { - if r == '_' { - if !hasBefore || idx+1 >= len(value) { - // can't end with an underscore - return errInvalidUnderscoreHex - } - } - hasBefore = isHexDigit(r) - } - return nil -} - -func cleanupNumberToken(value string) string { - cleanedVal := strings.Replace(value, "_", "", -1) - return cleanedVal -} - -func (p *tomlParser) parseRvalue() interface{} { - tok := p.getToken() - if tok == nil || tok.typ == tokenEOF { - p.raiseError(tok, "expecting a value") - } - - switch tok.typ { - case tokenString: - return tok.val - case tokenTrue: - return true - case tokenFalse: - return false - case tokenInf: - if tok.val[0] == '-' { - return math.Inf(-1) - } - return math.Inf(1) - case tokenNan: - return math.NaN() - case tokenInteger: - cleanedVal := cleanupNumberToken(tok.val) - base := 10 - s := cleanedVal - checkInvalidUnderscore := numberContainsInvalidUnderscore - if len(cleanedVal) >= 3 && cleanedVal[0] == '0' { - switch cleanedVal[1] { - case 'x': - checkInvalidUnderscore = hexNumberContainsInvalidUnderscore - base = 16 - case 'o': - base = 8 - case 'b': - base = 2 - default: - panic("invalid base") // the lexer should catch this first - } - s = cleanedVal[2:] - } - - err := checkInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - - var val interface{} - val, err = strconv.ParseInt(s, base, 64) - if err == nil { - return val - } - - if s[0] != '-' { - if val, err = strconv.ParseUint(s, base, 64); err == nil { - return val - } - } - p.raiseError(tok, "%s", err) - case tokenFloat: - err := numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - cleanedVal := cleanupNumberToken(tok.val) - val, err := strconv.ParseFloat(cleanedVal, 64) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenLocalTime: - val, err := ParseLocalTime(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenLocalDate: - // a local date may be followed by: - // * nothing: this is a local date - // * a local time: this is a local date-time - - next := p.peek() - if next == nil || next.typ != tokenLocalTime { - val, err := ParseLocalDate(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - } - - localDate := tok - localTime := p.getToken() - - next = p.peek() - if next == nil || next.typ != tokenTimeOffset { - v := localDate.val + "T" + localTime.val - val, err := ParseLocalDateTime(v) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - } - - offset := p.getToken() - - layout := time.RFC3339Nano - v := localDate.val + "T" + localTime.val + offset.val - val, err := time.ParseInLocation(layout, v, time.UTC) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenLeftBracket: - return p.parseArray() - case tokenLeftCurlyBrace: - return p.parseInlineTable() - case tokenEqual: - p.raiseError(tok, "cannot have multiple equals for the same key") - case tokenError: - p.raiseError(tok, "%s", tok) - default: - panic(fmt.Errorf("unhandled token: %v", tok)) - } - - return nil -} - -func tokenIsComma(t *token) bool { - return t != nil && t.typ == tokenComma -} - -func (p *tomlParser) parseInlineTable() *Tree { - tree := newTree() - var previous *token -Loop: - for { - follow := p.peek() - if follow == nil || follow.typ == tokenEOF { - p.raiseError(follow, "unterminated inline table") - } - switch follow.typ { - case tokenRightCurlyBrace: - p.getToken() - break Loop - case tokenKey, tokenInteger, tokenString: - if !tokenIsComma(previous) && previous != nil { - p.raiseError(follow, "comma expected between fields in inline table") - } - key := p.getToken() - p.assume(tokenEqual) - - parsedKey, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid key: %s", err) - } - - value := p.parseRvalue() - tree.SetPath(parsedKey, value) - case tokenComma: - if tokenIsComma(previous) { - p.raiseError(follow, "need field between two commas in inline table") - } - p.getToken() - default: - p.raiseError(follow, "unexpected token type in inline table: %s", follow.String()) - } - previous = follow - } - if tokenIsComma(previous) { - p.raiseError(previous, "trailing comma at the end of inline table") - } - tree.inline = true - return tree -} - -func (p *tomlParser) parseArray() interface{} { - var array []interface{} - arrayType := reflect.TypeOf(newTree()) - for { - follow := p.peek() - if follow == nil || follow.typ == tokenEOF { - p.raiseError(follow, "unterminated array") - } - if follow.typ == tokenRightBracket { - p.getToken() - break - } - val := p.parseRvalue() - if reflect.TypeOf(val) != arrayType { - arrayType = nil - } - array = append(array, val) - follow = p.peek() - if follow == nil || follow.typ == tokenEOF { - p.raiseError(follow, "unterminated array") - } - if follow.typ != tokenRightBracket && follow.typ != tokenComma { - p.raiseError(follow, "missing comma") - } - if follow.typ == tokenComma { - p.getToken() - } - } - - // if the array is a mixed-type array or its length is 0, - // don't convert it to a table array - if len(array) <= 0 { - arrayType = nil - } - // An array of Trees is actually an array of inline - // tables, which is a shorthand for a table array. If the - // array was not converted from []interface{} to []*Tree, - // the two notations would not be equivalent. - if arrayType == reflect.TypeOf(newTree()) { - tomlArray := make([]*Tree, len(array)) - for i, v := range array { - tomlArray[i] = v.(*Tree) - } - return tomlArray - } - return array -} - -func parseToml(flow []token) *Tree { - result := newTree() - result.position = Position{1, 1} - parser := &tomlParser{ - flowIdx: 0, - flow: flow, - tree: result, - currentTable: make([]string, 0), - seenTableKeys: make([]string, 0), - } - parser.run() - return result -} diff --git a/vendor/github.com/pelletier/go-toml/position.go b/vendor/github.com/pelletier/go-toml/position.go deleted file mode 100644 index c17bff87..00000000 --- a/vendor/github.com/pelletier/go-toml/position.go +++ /dev/null @@ -1,29 +0,0 @@ -// Position support for go-toml - -package toml - -import ( - "fmt" -) - -// Position of a document element within a TOML document. -// -// Line and Col are both 1-indexed positions for the element's line number and -// column number, respectively. Values of zero or less will cause Invalid(), -// to return true. -type Position struct { - Line int // line within the document - Col int // column within the line -} - -// String representation of the position. -// Displays 1-indexed line and column numbers. -func (p Position) String() string { - return fmt.Sprintf("(%d, %d)", p.Line, p.Col) -} - -// Invalid returns whether or not the position is valid (i.e. with negative or -// null values) -func (p Position) Invalid() bool { - return p.Line <= 0 || p.Col <= 0 -} diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go deleted file mode 100644 index b437fdd3..00000000 --- a/vendor/github.com/pelletier/go-toml/token.go +++ /dev/null @@ -1,136 +0,0 @@ -package toml - -import "fmt" - -// Define tokens -type tokenType int - -const ( - eof = -(iota + 1) -) - -const ( - tokenError tokenType = iota - tokenEOF - tokenComment - tokenKey - tokenString - tokenInteger - tokenTrue - tokenFalse - tokenFloat - tokenInf - tokenNan - tokenEqual - tokenLeftBracket - tokenRightBracket - tokenLeftCurlyBrace - tokenRightCurlyBrace - tokenLeftParen - tokenRightParen - tokenDoubleLeftBracket - tokenDoubleRightBracket - tokenLocalDate - tokenLocalTime - tokenTimeOffset - tokenKeyGroup - tokenKeyGroupArray - tokenComma - tokenColon - tokenDollar - tokenStar - tokenQuestion - tokenDot - tokenDotDot - tokenEOL -) - -var tokenTypeNames = []string{ - "Error", - "EOF", - "Comment", - "Key", - "String", - "Integer", - "True", - "False", - "Float", - "Inf", - "NaN", - "=", - "[", - "]", - "{", - "}", - "(", - ")", - "]]", - "[[", - "LocalDate", - "LocalTime", - "TimeOffset", - "KeyGroup", - "KeyGroupArray", - ",", - ":", - "$", - "*", - "?", - ".", - "..", - "EOL", -} - -type token struct { - Position - typ tokenType - val string -} - -func (tt tokenType) String() string { - idx := int(tt) - if idx < len(tokenTypeNames) { - return tokenTypeNames[idx] - } - return "Unknown" -} - -func (t token) String() string { - switch t.typ { - case tokenEOF: - return "EOF" - case tokenError: - return t.val - } - - return fmt.Sprintf("%q", t.val) -} - -func isSpace(r rune) bool { - return r == ' ' || r == '\t' -} - -func isAlphanumeric(r rune) bool { - return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || r == '_' -} - -func isKeyChar(r rune) bool { - // Keys start with the first character that isn't whitespace or [ and end - // with the last non-whitespace character before the equals sign. Keys - // cannot contain a # character." - return !(r == '\r' || r == '\n' || r == eof || r == '=') -} - -func isKeyStartChar(r rune) bool { - return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '[') -} - -func isDigit(r rune) bool { - return '0' <= r && r <= '9' -} - -func isHexDigit(r rune) bool { - return isDigit(r) || - (r >= 'a' && r <= 'f') || - (r >= 'A' && r <= 'F') -} diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go deleted file mode 100644 index 5541b941..00000000 --- a/vendor/github.com/pelletier/go-toml/toml.go +++ /dev/null @@ -1,533 +0,0 @@ -package toml - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - "strings" -) - -type tomlValue struct { - value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list - comment string - commented bool - multiline bool - literal bool - position Position -} - -// Tree is the result of the parsing of a TOML file. -type Tree struct { - values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree - comment string - commented bool - inline bool - position Position -} - -func newTree() *Tree { - return newTreeWithPosition(Position{}) -} - -func newTreeWithPosition(pos Position) *Tree { - return &Tree{ - values: make(map[string]interface{}), - position: pos, - } -} - -// TreeFromMap initializes a new Tree object using the given map. -func TreeFromMap(m map[string]interface{}) (*Tree, error) { - result, err := toTree(m) - if err != nil { - return nil, err - } - return result.(*Tree), nil -} - -// Position returns the position of the tree. -func (t *Tree) Position() Position { - return t.position -} - -// Has returns a boolean indicating if the given key exists. -func (t *Tree) Has(key string) bool { - if key == "" { - return false - } - return t.HasPath(strings.Split(key, ".")) -} - -// HasPath returns true if the given path of keys exists, false otherwise. -func (t *Tree) HasPath(keys []string) bool { - return t.GetPath(keys) != nil -} - -// Keys returns the keys of the toplevel tree (does not recurse). -func (t *Tree) Keys() []string { - keys := make([]string, len(t.values)) - i := 0 - for k := range t.values { - keys[i] = k - i++ - } - return keys -} - -// Get the value at key in the Tree. -// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. -// If you need to retrieve non-bare keys, use GetPath. -// Returns nil if the path does not exist in the tree. -// If keys is of length zero, the current tree is returned. -func (t *Tree) Get(key string) interface{} { - if key == "" { - return t - } - return t.GetPath(strings.Split(key, ".")) -} - -// GetPath returns the element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetPath(keys []string) interface{} { - if len(keys) == 0 { - return t - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return nil - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return nil - } - subtree = node[len(node)-1] - default: - return nil // cannot navigate through other node types - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - return node.value - default: - return node - } -} - -// GetArray returns the value at key in the Tree. -// It returns []string, []int64, etc type if key has homogeneous lists -// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. -// Returns nil if the path does not exist in the tree. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetArray(key string) interface{} { - if key == "" { - return t - } - return t.GetArrayPath(strings.Split(key, ".")) -} - -// GetArrayPath returns the element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetArrayPath(keys []string) interface{} { - if len(keys) == 0 { - return t - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return nil - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return nil - } - subtree = node[len(node)-1] - default: - return nil // cannot navigate through other node types - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - switch n := node.value.(type) { - case []interface{}: - return getArray(n) - default: - return node.value - } - default: - return node - } -} - -// if homogeneous array, then return slice type object over []interface{} -func getArray(n []interface{}) interface{} { - var s []string - var i64 []int64 - var f64 []float64 - var bl []bool - for _, value := range n { - switch v := value.(type) { - case string: - s = append(s, v) - case int64: - i64 = append(i64, v) - case float64: - f64 = append(f64, v) - case bool: - bl = append(bl, v) - default: - return n - } - } - if len(s) == len(n) { - return s - } else if len(i64) == len(n) { - return i64 - } else if len(f64) == len(n) { - return f64 - } else if len(bl) == len(n) { - return bl - } - return n -} - -// GetPosition returns the position of the given key. -func (t *Tree) GetPosition(key string) Position { - if key == "" { - return t.position - } - return t.GetPositionPath(strings.Split(key, ".")) -} - -// SetPositionPath sets the position of element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree position is set. -func (t *Tree) SetPositionPath(keys []string, pos Position) { - if len(keys) == 0 { - t.position = pos - return - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return - } - subtree = node[len(node)-1] - default: - return - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - node.position = pos - return - case *Tree: - node.position = pos - return - case []*Tree: - // go to most recent element - if len(node) == 0 { - return - } - node[len(node)-1].position = pos - return - } -} - -// GetPositionPath returns the element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetPositionPath(keys []string) Position { - if len(keys) == 0 { - return t.position - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return Position{0, 0} - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return Position{0, 0} - } - subtree = node[len(node)-1] - default: - return Position{0, 0} - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - return node.position - case *Tree: - return node.position - case []*Tree: - // go to most recent element - if len(node) == 0 { - return Position{0, 0} - } - return node[len(node)-1].position - default: - return Position{0, 0} - } -} - -// GetDefault works like Get but with a default value -func (t *Tree) GetDefault(key string, def interface{}) interface{} { - val := t.Get(key) - if val == nil { - return def - } - return val -} - -// SetOptions arguments are supplied to the SetWithOptions and SetPathWithOptions functions to modify marshalling behaviour. -// The default values within the struct are valid default options. -type SetOptions struct { - Comment string - Commented bool - Multiline bool - Literal bool -} - -// SetWithOptions is the same as Set, but allows you to provide formatting -// instructions to the key, that will be used by Marshal(). -func (t *Tree) SetWithOptions(key string, opts SetOptions, value interface{}) { - t.SetPathWithOptions(strings.Split(key, "."), opts, value) -} - -// SetPathWithOptions is the same as SetPath, but allows you to provide -// formatting instructions to the key, that will be reused by Marshal(). -func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interface{}) { - subtree := t - for i, intermediateKey := range keys[:len(keys)-1] { - nextTree, exists := subtree.values[intermediateKey] - if !exists { - nextTree = newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) - subtree.values[intermediateKey] = nextTree // add new element here - } - switch node := nextTree.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - // create element if it does not exist - node = append(node, newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})) - subtree.values[intermediateKey] = node - } - subtree = node[len(node)-1] - } - } - - var toInsert interface{} - - switch v := value.(type) { - case *Tree: - v.comment = opts.Comment - v.commented = opts.Commented - toInsert = value - case []*Tree: - for i := range v { - v[i].commented = opts.Commented - } - toInsert = value - case *tomlValue: - v.comment = opts.Comment - v.commented = opts.Commented - v.multiline = opts.Multiline - v.literal = opts.Literal - toInsert = v - default: - toInsert = &tomlValue{value: value, - comment: opts.Comment, - commented: opts.Commented, - multiline: opts.Multiline, - literal: opts.Literal, - position: Position{Line: subtree.position.Line + len(subtree.values) + 1, Col: subtree.position.Col}} - } - - subtree.values[keys[len(keys)-1]] = toInsert -} - -// Set an element in the tree. -// Key is a dot-separated path (e.g. a.b.c). -// Creates all necessary intermediate trees, if needed. -func (t *Tree) Set(key string, value interface{}) { - t.SetWithComment(key, "", false, value) -} - -// SetWithComment is the same as Set, but allows you to provide comment -// information to the key, that will be reused by Marshal(). -func (t *Tree) SetWithComment(key string, comment string, commented bool, value interface{}) { - t.SetPathWithComment(strings.Split(key, "."), comment, commented, value) -} - -// SetPath sets an element in the tree. -// Keys is an array of path elements (e.g. {"a","b","c"}). -// Creates all necessary intermediate trees, if needed. -func (t *Tree) SetPath(keys []string, value interface{}) { - t.SetPathWithComment(keys, "", false, value) -} - -// SetPathWithComment is the same as SetPath, but allows you to provide comment -// information to the key, that will be reused by Marshal(). -func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) { - t.SetPathWithOptions(keys, SetOptions{Comment: comment, Commented: commented}, value) -} - -// Delete removes a key from the tree. -// Key is a dot-separated path (e.g. a.b.c). -func (t *Tree) Delete(key string) error { - keys, err := parseKey(key) - if err != nil { - return err - } - return t.DeletePath(keys) -} - -// DeletePath removes a key from the tree. -// Keys is an array of path elements (e.g. {"a","b","c"}). -func (t *Tree) DeletePath(keys []string) error { - keyLen := len(keys) - if keyLen == 1 { - delete(t.values, keys[0]) - return nil - } - tree := t.GetPath(keys[:keyLen-1]) - item := keys[keyLen-1] - switch node := tree.(type) { - case *Tree: - delete(node.values, item) - return nil - } - return errors.New("no such key to delete") -} - -// createSubTree takes a tree and a key and create the necessary intermediate -// subtrees to create a subtree at that point. In-place. -// -// e.g. passing a.b.c will create (assuming tree is empty) tree[a], tree[a][b] -// and tree[a][b][c] -// -// Returns nil on success, error object on failure -func (t *Tree) createSubTree(keys []string, pos Position) error { - subtree := t - for i, intermediateKey := range keys { - nextTree, exists := subtree.values[intermediateKey] - if !exists { - tree := newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) - tree.position = pos - tree.inline = subtree.inline - subtree.values[intermediateKey] = tree - nextTree = tree - } - - switch node := nextTree.(type) { - case []*Tree: - subtree = node[len(node)-1] - case *Tree: - subtree = node - default: - return fmt.Errorf("unknown type for path %s (%s): %T (%#v)", - strings.Join(keys, "."), intermediateKey, nextTree, nextTree) - } - } - return nil -} - -// LoadBytes creates a Tree from a []byte. -func LoadBytes(b []byte) (tree *Tree, err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = fmt.Errorf("%s", r) - } - }() - - if len(b) >= 4 && (hasUTF32BigEndianBOM4(b) || hasUTF32LittleEndianBOM4(b)) { - b = b[4:] - } else if len(b) >= 3 && hasUTF8BOM3(b) { - b = b[3:] - } else if len(b) >= 2 && (hasUTF16BigEndianBOM2(b) || hasUTF16LittleEndianBOM2(b)) { - b = b[2:] - } - - tree = parseToml(lexToml(b)) - return -} - -func hasUTF16BigEndianBOM2(b []byte) bool { - return b[0] == 0xFE && b[1] == 0xFF -} - -func hasUTF16LittleEndianBOM2(b []byte) bool { - return b[0] == 0xFF && b[1] == 0xFE -} - -func hasUTF8BOM3(b []byte) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -func hasUTF32BigEndianBOM4(b []byte) bool { - return b[0] == 0x00 && b[1] == 0x00 && b[2] == 0xFE && b[3] == 0xFF -} - -func hasUTF32LittleEndianBOM4(b []byte) bool { - return b[0] == 0xFF && b[1] == 0xFE && b[2] == 0x00 && b[3] == 0x00 -} - -// LoadReader creates a Tree from any io.Reader. -func LoadReader(reader io.Reader) (tree *Tree, err error) { - inputBytes, err := ioutil.ReadAll(reader) - if err != nil { - return - } - tree, err = LoadBytes(inputBytes) - return -} - -// Load creates a Tree from a string. -func Load(content string) (tree *Tree, err error) { - return LoadBytes([]byte(content)) -} - -// LoadFile creates a Tree from a file. -func LoadFile(path string) (tree *Tree, err error) { - file, err := os.Open(path) - if err != nil { - return nil, err - } - defer file.Close() - return LoadReader(file) -} diff --git a/vendor/github.com/pelletier/go-toml/tomlpub.go b/vendor/github.com/pelletier/go-toml/tomlpub.go deleted file mode 100644 index 4136b462..00000000 --- a/vendor/github.com/pelletier/go-toml/tomlpub.go +++ /dev/null @@ -1,71 +0,0 @@ -package toml - -// PubTOMLValue wrapping tomlValue in order to access all properties from outside. -type PubTOMLValue = tomlValue - -func (ptv *PubTOMLValue) Value() interface{} { - return ptv.value -} -func (ptv *PubTOMLValue) Comment() string { - return ptv.comment -} -func (ptv *PubTOMLValue) Commented() bool { - return ptv.commented -} -func (ptv *PubTOMLValue) Multiline() bool { - return ptv.multiline -} -func (ptv *PubTOMLValue) Position() Position { - return ptv.position -} - -func (ptv *PubTOMLValue) SetValue(v interface{}) { - ptv.value = v -} -func (ptv *PubTOMLValue) SetComment(s string) { - ptv.comment = s -} -func (ptv *PubTOMLValue) SetCommented(c bool) { - ptv.commented = c -} -func (ptv *PubTOMLValue) SetMultiline(m bool) { - ptv.multiline = m -} -func (ptv *PubTOMLValue) SetPosition(p Position) { - ptv.position = p -} - -// PubTree wrapping Tree in order to access all properties from outside. -type PubTree = Tree - -func (pt *PubTree) Values() map[string]interface{} { - return pt.values -} - -func (pt *PubTree) Comment() string { - return pt.comment -} - -func (pt *PubTree) Commented() bool { - return pt.commented -} - -func (pt *PubTree) Inline() bool { - return pt.inline -} - -func (pt *PubTree) SetValues(v map[string]interface{}) { - pt.values = v -} - -func (pt *PubTree) SetComment(c string) { - pt.comment = c -} - -func (pt *PubTree) SetCommented(c bool) { - pt.commented = c -} - -func (pt *PubTree) SetInline(i bool) { - pt.inline = i -} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create.go b/vendor/github.com/pelletier/go-toml/tomltree_create.go deleted file mode 100644 index 80353500..00000000 --- a/vendor/github.com/pelletier/go-toml/tomltree_create.go +++ /dev/null @@ -1,155 +0,0 @@ -package toml - -import ( - "fmt" - "reflect" - "time" -) - -var kindToType = [reflect.String + 1]reflect.Type{ - reflect.Bool: reflect.TypeOf(true), - reflect.String: reflect.TypeOf(""), - reflect.Float32: reflect.TypeOf(float64(1)), - reflect.Float64: reflect.TypeOf(float64(1)), - reflect.Int: reflect.TypeOf(int64(1)), - reflect.Int8: reflect.TypeOf(int64(1)), - reflect.Int16: reflect.TypeOf(int64(1)), - reflect.Int32: reflect.TypeOf(int64(1)), - reflect.Int64: reflect.TypeOf(int64(1)), - reflect.Uint: reflect.TypeOf(uint64(1)), - reflect.Uint8: reflect.TypeOf(uint64(1)), - reflect.Uint16: reflect.TypeOf(uint64(1)), - reflect.Uint32: reflect.TypeOf(uint64(1)), - reflect.Uint64: reflect.TypeOf(uint64(1)), -} - -// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found. -// supported values: -// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32 -func typeFor(k reflect.Kind) reflect.Type { - if k > 0 && int(k) < len(kindToType) { - return kindToType[k] - } - return nil -} - -func simpleValueCoercion(object interface{}) (interface{}, error) { - switch original := object.(type) { - case string, bool, int64, uint64, float64, time.Time: - return original, nil - case int: - return int64(original), nil - case int8: - return int64(original), nil - case int16: - return int64(original), nil - case int32: - return int64(original), nil - case uint: - return uint64(original), nil - case uint8: - return uint64(original), nil - case uint16: - return uint64(original), nil - case uint32: - return uint64(original), nil - case float32: - return float64(original), nil - case fmt.Stringer: - return original.String(), nil - case []interface{}: - value := reflect.ValueOf(original) - length := value.Len() - arrayValue := reflect.MakeSlice(value.Type(), 0, length) - for i := 0; i < length; i++ { - val := value.Index(i).Interface() - simpleValue, err := simpleValueCoercion(val) - if err != nil { - return nil, err - } - arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) - } - return arrayValue.Interface(), nil - default: - return nil, fmt.Errorf("cannot convert type %T to Tree", object) - } -} - -func sliceToTree(object interface{}) (interface{}, error) { - // arrays are a bit tricky, since they can represent either a - // collection of simple values, which is represented by one - // *tomlValue, or an array of tables, which is represented by an - // array of *Tree. - - // holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice - value := reflect.ValueOf(object) - insideType := value.Type().Elem() - length := value.Len() - if length > 0 { - insideType = reflect.ValueOf(value.Index(0).Interface()).Type() - } - if insideType.Kind() == reflect.Map { - // this is considered as an array of tables - tablesArray := make([]*Tree, 0, length) - for i := 0; i < length; i++ { - table := value.Index(i) - tree, err := toTree(table.Interface()) - if err != nil { - return nil, err - } - tablesArray = append(tablesArray, tree.(*Tree)) - } - return tablesArray, nil - } - - sliceType := typeFor(insideType.Kind()) - if sliceType == nil { - sliceType = insideType - } - - arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length) - - for i := 0; i < length; i++ { - val := value.Index(i).Interface() - simpleValue, err := simpleValueCoercion(val) - if err != nil { - return nil, err - } - arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) - } - return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil -} - -func toTree(object interface{}) (interface{}, error) { - value := reflect.ValueOf(object) - - if value.Kind() == reflect.Map { - values := map[string]interface{}{} - keys := value.MapKeys() - for _, key := range keys { - if key.Kind() != reflect.String { - if _, ok := key.Interface().(string); !ok { - return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind()) - } - } - - v := value.MapIndex(key) - newValue, err := toTree(v.Interface()) - if err != nil { - return nil, err - } - values[key.String()] = newValue - } - return &Tree{values: values, position: Position{}}, nil - } - - if value.Kind() == reflect.Array || value.Kind() == reflect.Slice { - return sliceToTree(object) - } - - simpleValue, err := simpleValueCoercion(object) - if err != nil { - return nil, err - } - return &tomlValue{value: simpleValue, position: Position{}}, nil -} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go deleted file mode 100644 index c9afbdab..00000000 --- a/vendor/github.com/pelletier/go-toml/tomltree_write.go +++ /dev/null @@ -1,552 +0,0 @@ -package toml - -import ( - "bytes" - "fmt" - "io" - "math" - "math/big" - "reflect" - "sort" - "strconv" - "strings" - "time" -) - -type valueComplexity int - -const ( - valueSimple valueComplexity = iota + 1 - valueComplex -) - -type sortNode struct { - key string - complexity valueComplexity -} - -// Encodes a string to a TOML-compliant multi-line string value -// This function is a clone of the existing encodeTomlString function, except that whitespace characters -// are preserved. Quotation marks and backslashes are also not escaped. -func encodeMultilineTomlString(value string, commented string) string { - var b bytes.Buffer - adjacentQuoteCount := 0 - - b.WriteString(commented) - for i, rr := range value { - if rr != '"' { - adjacentQuoteCount = 0 - } else { - adjacentQuoteCount++ - } - switch rr { - case '\b': - b.WriteString(`\b`) - case '\t': - b.WriteString("\t") - case '\n': - b.WriteString("\n" + commented) - case '\f': - b.WriteString(`\f`) - case '\r': - b.WriteString("\r") - case '"': - if adjacentQuoteCount >= 3 || i == len(value)-1 { - adjacentQuoteCount = 0 - b.WriteString(`\"`) - } else { - b.WriteString(`"`) - } - case '\\': - b.WriteString(`\`) - default: - intRr := uint16(rr) - if intRr < 0x001F { - b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) - } else { - b.WriteRune(rr) - } - } - } - return b.String() -} - -// Encodes a string to a TOML-compliant string value -func encodeTomlString(value string) string { - var b bytes.Buffer - - for _, rr := range value { - switch rr { - case '\b': - b.WriteString(`\b`) - case '\t': - b.WriteString(`\t`) - case '\n': - b.WriteString(`\n`) - case '\f': - b.WriteString(`\f`) - case '\r': - b.WriteString(`\r`) - case '"': - b.WriteString(`\"`) - case '\\': - b.WriteString(`\\`) - default: - intRr := uint16(rr) - if intRr < 0x001F { - b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) - } else { - b.WriteRune(rr) - } - } - } - return b.String() -} - -func tomlTreeStringRepresentation(t *Tree, ord MarshalOrder) (string, error) { - var orderedVals []sortNode - switch ord { - case OrderPreserve: - orderedVals = sortByLines(t) - default: - orderedVals = sortAlphabetical(t) - } - - var values []string - for _, node := range orderedVals { - k := node.key - v := t.values[k] - - repr, err := tomlValueStringRepresentation(v, "", "", ord, false) - if err != nil { - return "", err - } - values = append(values, quoteKeyIfNeeded(k)+" = "+repr) - } - return "{ " + strings.Join(values, ", ") + " }", nil -} - -func tomlValueStringRepresentation(v interface{}, commented string, indent string, ord MarshalOrder, arraysOneElementPerLine bool) (string, error) { - // this interface check is added to dereference the change made in the writeTo function. - // That change was made to allow this function to see formatting options. - tv, ok := v.(*tomlValue) - if ok { - v = tv.value - } else { - tv = &tomlValue{} - } - - switch value := v.(type) { - case uint64: - return strconv.FormatUint(value, 10), nil - case int64: - return strconv.FormatInt(value, 10), nil - case float64: - // Default bit length is full 64 - bits := 64 - // Float panics if nan is used - if !math.IsNaN(value) { - // if 32 bit accuracy is enough to exactly show, use 32 - _, acc := big.NewFloat(value).Float32() - if acc == big.Exact { - bits = 32 - } - } - if math.Trunc(value) == value { - return strings.ToLower(strconv.FormatFloat(value, 'f', 1, bits)), nil - } - return strings.ToLower(strconv.FormatFloat(value, 'f', -1, bits)), nil - case string: - if tv.multiline { - if tv.literal { - b := strings.Builder{} - b.WriteString("'''\n") - b.Write([]byte(value)) - b.WriteString("\n'''") - return b.String(), nil - } else { - return "\"\"\"\n" + encodeMultilineTomlString(value, commented) + "\"\"\"", nil - } - } - return "\"" + encodeTomlString(value) + "\"", nil - case []byte: - b, _ := v.([]byte) - return string(b), nil - case bool: - if value { - return "true", nil - } - return "false", nil - case time.Time: - return value.Format(time.RFC3339), nil - case LocalDate: - return value.String(), nil - case LocalDateTime: - return value.String(), nil - case LocalTime: - return value.String(), nil - case *Tree: - return tomlTreeStringRepresentation(value, ord) - case nil: - return "", nil - } - - rv := reflect.ValueOf(v) - - if rv.Kind() == reflect.Slice { - var values []string - for i := 0; i < rv.Len(); i++ { - item := rv.Index(i).Interface() - itemRepr, err := tomlValueStringRepresentation(item, commented, indent, ord, arraysOneElementPerLine) - if err != nil { - return "", err - } - values = append(values, itemRepr) - } - if arraysOneElementPerLine && len(values) > 1 { - stringBuffer := bytes.Buffer{} - valueIndent := indent + ` ` // TODO: move that to a shared encoder state - - stringBuffer.WriteString("[\n") - - for _, value := range values { - stringBuffer.WriteString(valueIndent) - stringBuffer.WriteString(commented + value) - stringBuffer.WriteString(`,`) - stringBuffer.WriteString("\n") - } - - stringBuffer.WriteString(indent + commented + "]") - - return stringBuffer.String(), nil - } - return "[" + strings.Join(values, ", ") + "]", nil - } - return "", fmt.Errorf("unsupported value type %T: %v", v, v) -} - -func getTreeArrayLine(trees []*Tree) (line int) { - // Prevent returning 0 for empty trees - line = int(^uint(0) >> 1) - // get lowest line number >= 0 - for _, tv := range trees { - if tv.position.Line < line || line == 0 { - line = tv.position.Line - } - } - return -} - -func sortByLines(t *Tree) (vals []sortNode) { - var ( - line int - lines []int - tv *Tree - tom *tomlValue - node sortNode - ) - vals = make([]sortNode, 0) - m := make(map[int]sortNode) - - for k := range t.values { - v := t.values[k] - switch v.(type) { - case *Tree: - tv = v.(*Tree) - line = tv.position.Line - node = sortNode{key: k, complexity: valueComplex} - case []*Tree: - line = getTreeArrayLine(v.([]*Tree)) - node = sortNode{key: k, complexity: valueComplex} - default: - tom = v.(*tomlValue) - line = tom.position.Line - node = sortNode{key: k, complexity: valueSimple} - } - lines = append(lines, line) - vals = append(vals, node) - m[line] = node - } - sort.Ints(lines) - - for i, line := range lines { - vals[i] = m[line] - } - - return vals -} - -func sortAlphabetical(t *Tree) (vals []sortNode) { - var ( - node sortNode - simpVals []string - compVals []string - ) - vals = make([]sortNode, 0) - m := make(map[string]sortNode) - - for k := range t.values { - v := t.values[k] - switch v.(type) { - case *Tree, []*Tree: - node = sortNode{key: k, complexity: valueComplex} - compVals = append(compVals, node.key) - default: - node = sortNode{key: k, complexity: valueSimple} - simpVals = append(simpVals, node.key) - } - vals = append(vals, node) - m[node.key] = node - } - - // Simples first to match previous implementation - sort.Strings(simpVals) - i := 0 - for _, key := range simpVals { - vals[i] = m[key] - i++ - } - - sort.Strings(compVals) - for _, key := range compVals { - vals[i] = m[key] - i++ - } - - return vals -} - -func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { - return t.writeToOrdered(w, indent, keyspace, bytesCount, arraysOneElementPerLine, OrderAlphabetical, " ", false, false) -} - -func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool, ord MarshalOrder, indentString string, compactComments, parentCommented bool) (int64, error) { - var orderedVals []sortNode - - switch ord { - case OrderPreserve: - orderedVals = sortByLines(t) - default: - orderedVals = sortAlphabetical(t) - } - - for _, node := range orderedVals { - switch node.complexity { - case valueComplex: - k := node.key - v := t.values[k] - - combinedKey := quoteKeyIfNeeded(k) - if keyspace != "" { - combinedKey = keyspace + "." + combinedKey - } - - switch node := v.(type) { - // node has to be of those two types given how keys are sorted above - case *Tree: - tv, ok := t.values[k].(*Tree) - if !ok { - return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) - } - if tv.comment != "" { - comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1) - start := "# " - if strings.HasPrefix(comment, "#") { - start = "" - } - writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment) - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - - var commented string - if parentCommented || t.commented || tv.commented { - commented = "# " - } - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - bytesCount, err = node.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, compactComments, parentCommented || t.commented || tv.commented) - if err != nil { - return bytesCount, err - } - case []*Tree: - for _, subTree := range node { - var commented string - if parentCommented || t.commented || subTree.commented { - commented = "# " - } - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - - bytesCount, err = subTree.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, compactComments, parentCommented || t.commented || subTree.commented) - if err != nil { - return bytesCount, err - } - } - } - default: // Simple - k := node.key - v, ok := t.values[k].(*tomlValue) - if !ok { - return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) - } - - var commented string - if parentCommented || t.commented || v.commented { - commented = "# " - } - repr, err := tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine) - if err != nil { - return bytesCount, err - } - - if v.comment != "" { - comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1) - start := "# " - if strings.HasPrefix(comment, "#") { - start = "" - } - if !compactComments { - writtenBytesCountComment, errc := writeStrings(w, "\n") - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - writtenBytesCountComment, errc := writeStrings(w, indent, start, comment, "\n") - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - - quotedKey := quoteKeyIfNeeded(k) - writtenBytesCount, err := writeStrings(w, indent, commented, quotedKey, " = ", repr, "\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - } - } - - return bytesCount, nil -} - -// quote a key if it does not fit the bare key format (A-Za-z0-9_-) -// quoted keys use the same rules as strings -func quoteKeyIfNeeded(k string) string { - // when encoding a map with the 'quoteMapKeys' option enabled, the tree will contain - // keys that have already been quoted. - // not an ideal situation, but good enough of a stop gap. - if len(k) >= 2 && k[0] == '"' && k[len(k)-1] == '"' { - return k - } - isBare := true - for _, r := range k { - if !isValidBareChar(r) { - isBare = false - break - } - } - if isBare { - return k - } - return quoteKey(k) -} - -func quoteKey(k string) string { - return "\"" + encodeTomlString(k) + "\"" -} - -func writeStrings(w io.Writer, s ...string) (int, error) { - var n int - for i := range s { - b, err := io.WriteString(w, s[i]) - n += b - if err != nil { - return n, err - } - } - return n, nil -} - -// WriteTo encode the Tree as Toml and writes it to the writer w. -// Returns the number of bytes written in case of success, or an error if anything happened. -func (t *Tree) WriteTo(w io.Writer) (int64, error) { - return t.writeTo(w, "", "", 0, false) -} - -// ToTomlString generates a human-readable representation of the current tree. -// Output spans multiple lines, and is suitable for ingest by a TOML parser. -// If the conversion cannot be performed, ToString returns a non-nil error. -func (t *Tree) ToTomlString() (string, error) { - b, err := t.Marshal() - if err != nil { - return "", err - } - return string(b), nil -} - -// String generates a human-readable representation of the current tree. -// Alias of ToString. Present to implement the fmt.Stringer interface. -func (t *Tree) String() string { - result, _ := t.ToTomlString() - return result -} - -// ToMap recursively generates a representation of the tree using Go built-in structures. -// The following types are used: -// -// * bool -// * float64 -// * int64 -// * string -// * uint64 -// * time.Time -// * map[string]interface{} (where interface{} is any of this list) -// * []interface{} (where interface{} is any of this list) -func (t *Tree) ToMap() map[string]interface{} { - result := map[string]interface{}{} - - for k, v := range t.values { - switch node := v.(type) { - case []*Tree: - var array []interface{} - for _, item := range node { - array = append(array, item.ToMap()) - } - result[k] = array - case *Tree: - result[k] = node.ToMap() - case *tomlValue: - result[k] = tomlValueToGo(node.value) - } - } - return result -} - -func tomlValueToGo(v interface{}) interface{} { - if tree, ok := v.(*Tree); ok { - return tree.ToMap() - } - - rv := reflect.ValueOf(v) - - if rv.Kind() != reflect.Slice { - return v - } - values := make([]interface{}, rv.Len()) - for i := 0; i < rv.Len(); i++ { - item := rv.Index(i).Interface() - values[i] = tomlValueToGo(item) - } - return values -} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_writepub.go b/vendor/github.com/pelletier/go-toml/tomltree_writepub.go deleted file mode 100644 index fa326308..00000000 --- a/vendor/github.com/pelletier/go-toml/tomltree_writepub.go +++ /dev/null @@ -1,6 +0,0 @@ -package toml - -// ValueStringRepresentation transforms an interface{} value into its toml string representation. -func ValueStringRepresentation(v interface{}, commented string, indent string, ord MarshalOrder, arraysOneElementPerLine bool) (string, error) { - return tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine) -} diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 90639781..0ca86a3d 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -22,7 +22,7 @@ import ( dto "github.com/prometheus/client_model/go" - "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/matttproud/golang_protobuf_extensions/v2/pbutil" "github.com/prometheus/common/model" ) diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index 7f611ffa..ca214060 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -18,7 +18,7 @@ import ( "io" "net/http" - "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/matttproud/golang_protobuf_extensions/v2/pbutil" "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" "google.golang.org/protobuf/encoding/prototext" diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE deleted file mode 100644 index 6a66aea5..00000000 --- a/vendor/golang.org/x/crypto/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS deleted file mode 100644 index 73309904..00000000 --- a/vendor/golang.org/x/crypto/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b.go b/vendor/golang.org/x/crypto/blake2b/blake2b.go deleted file mode 100644 index d2e98d42..00000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2b.go +++ /dev/null @@ -1,291 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package blake2b implements the BLAKE2b hash algorithm defined by RFC 7693 -// and the extendable output function (XOF) BLAKE2Xb. -// -// BLAKE2b is optimized for 64-bit platforms—including NEON-enabled ARMs—and -// produces digests of any size between 1 and 64 bytes. -// For a detailed specification of BLAKE2b see https://blake2.net/blake2.pdf -// and for BLAKE2Xb see https://blake2.net/blake2x.pdf -// -// If you aren't sure which function you need, use BLAKE2b (Sum512 or New512). -// If you need a secret-key MAC (message authentication code), use the New512 -// function with a non-nil key. -// -// BLAKE2X is a construction to compute hash values larger than 64 bytes. It -// can produce hash values between 0 and 4 GiB. -package blake2b - -import ( - "encoding/binary" - "errors" - "hash" -) - -const ( - // The blocksize of BLAKE2b in bytes. - BlockSize = 128 - // The hash size of BLAKE2b-512 in bytes. - Size = 64 - // The hash size of BLAKE2b-384 in bytes. - Size384 = 48 - // The hash size of BLAKE2b-256 in bytes. - Size256 = 32 -) - -var ( - useAVX2 bool - useAVX bool - useSSE4 bool -) - -var ( - errKeySize = errors.New("blake2b: invalid key size") - errHashSize = errors.New("blake2b: invalid hash size") -) - -var iv = [8]uint64{ - 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, - 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, -} - -// Sum512 returns the BLAKE2b-512 checksum of the data. -func Sum512(data []byte) [Size]byte { - var sum [Size]byte - checkSum(&sum, Size, data) - return sum -} - -// Sum384 returns the BLAKE2b-384 checksum of the data. -func Sum384(data []byte) [Size384]byte { - var sum [Size]byte - var sum384 [Size384]byte - checkSum(&sum, Size384, data) - copy(sum384[:], sum[:Size384]) - return sum384 -} - -// Sum256 returns the BLAKE2b-256 checksum of the data. -func Sum256(data []byte) [Size256]byte { - var sum [Size]byte - var sum256 [Size256]byte - checkSum(&sum, Size256, data) - copy(sum256[:], sum[:Size256]) - return sum256 -} - -// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil -// key turns the hash into a MAC. The key must be between zero and 64 bytes long. -func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) } - -// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil -// key turns the hash into a MAC. The key must be between zero and 64 bytes long. -func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) } - -// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil -// key turns the hash into a MAC. The key must be between zero and 64 bytes long. -func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) } - -// New returns a new hash.Hash computing the BLAKE2b checksum with a custom length. -// A non-nil key turns the hash into a MAC. The key must be between zero and 64 bytes long. -// The hash size can be a value between 1 and 64 but it is highly recommended to use -// values equal or greater than: -// - 32 if BLAKE2b is used as a hash function (The key is zero bytes long). -// - 16 if BLAKE2b is used as a MAC function (The key is at least 16 bytes long). -// When the key is nil, the returned hash.Hash implements BinaryMarshaler -// and BinaryUnmarshaler for state (de)serialization as documented by hash.Hash. -func New(size int, key []byte) (hash.Hash, error) { return newDigest(size, key) } - -func newDigest(hashSize int, key []byte) (*digest, error) { - if hashSize < 1 || hashSize > Size { - return nil, errHashSize - } - if len(key) > Size { - return nil, errKeySize - } - d := &digest{ - size: hashSize, - keyLen: len(key), - } - copy(d.key[:], key) - d.Reset() - return d, nil -} - -func checkSum(sum *[Size]byte, hashSize int, data []byte) { - h := iv - h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24) - var c [2]uint64 - - if length := len(data); length > BlockSize { - n := length &^ (BlockSize - 1) - if length == n { - n -= BlockSize - } - hashBlocks(&h, &c, 0, data[:n]) - data = data[n:] - } - - var block [BlockSize]byte - offset := copy(block[:], data) - remaining := uint64(BlockSize - offset) - if c[0] < remaining { - c[1]-- - } - c[0] -= remaining - - hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) - - for i, v := range h[:(hashSize+7)/8] { - binary.LittleEndian.PutUint64(sum[8*i:], v) - } -} - -type digest struct { - h [8]uint64 - c [2]uint64 - size int - block [BlockSize]byte - offset int - - key [BlockSize]byte - keyLen int -} - -const ( - magic = "b2b" - marshaledSize = len(magic) + 8*8 + 2*8 + 1 + BlockSize + 1 -) - -func (d *digest) MarshalBinary() ([]byte, error) { - if d.keyLen != 0 { - return nil, errors.New("crypto/blake2b: cannot marshal MACs") - } - b := make([]byte, 0, marshaledSize) - b = append(b, magic...) - for i := 0; i < 8; i++ { - b = appendUint64(b, d.h[i]) - } - b = appendUint64(b, d.c[0]) - b = appendUint64(b, d.c[1]) - // Maximum value for size is 64 - b = append(b, byte(d.size)) - b = append(b, d.block[:]...) - b = append(b, byte(d.offset)) - return b, nil -} - -func (d *digest) UnmarshalBinary(b []byte) error { - if len(b) < len(magic) || string(b[:len(magic)]) != magic { - return errors.New("crypto/blake2b: invalid hash state identifier") - } - if len(b) != marshaledSize { - return errors.New("crypto/blake2b: invalid hash state size") - } - b = b[len(magic):] - for i := 0; i < 8; i++ { - b, d.h[i] = consumeUint64(b) - } - b, d.c[0] = consumeUint64(b) - b, d.c[1] = consumeUint64(b) - d.size = int(b[0]) - b = b[1:] - copy(d.block[:], b[:BlockSize]) - b = b[BlockSize:] - d.offset = int(b[0]) - return nil -} - -func (d *digest) BlockSize() int { return BlockSize } - -func (d *digest) Size() int { return d.size } - -func (d *digest) Reset() { - d.h = iv - d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24) - d.offset, d.c[0], d.c[1] = 0, 0, 0 - if d.keyLen > 0 { - d.block = d.key - d.offset = BlockSize - } -} - -func (d *digest) Write(p []byte) (n int, err error) { - n = len(p) - - if d.offset > 0 { - remaining := BlockSize - d.offset - if n <= remaining { - d.offset += copy(d.block[d.offset:], p) - return - } - copy(d.block[d.offset:], p[:remaining]) - hashBlocks(&d.h, &d.c, 0, d.block[:]) - d.offset = 0 - p = p[remaining:] - } - - if length := len(p); length > BlockSize { - nn := length &^ (BlockSize - 1) - if length == nn { - nn -= BlockSize - } - hashBlocks(&d.h, &d.c, 0, p[:nn]) - p = p[nn:] - } - - if len(p) > 0 { - d.offset += copy(d.block[:], p) - } - - return -} - -func (d *digest) Sum(sum []byte) []byte { - var hash [Size]byte - d.finalize(&hash) - return append(sum, hash[:d.size]...) -} - -func (d *digest) finalize(hash *[Size]byte) { - var block [BlockSize]byte - copy(block[:], d.block[:d.offset]) - remaining := uint64(BlockSize - d.offset) - - c := d.c - if c[0] < remaining { - c[1]-- - } - c[0] -= remaining - - h := d.h - hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) - - for i, v := range h { - binary.LittleEndian.PutUint64(hash[8*i:], v) - } -} - -func appendUint64(b []byte, x uint64) []byte { - var a [8]byte - binary.BigEndian.PutUint64(a[:], x) - return append(b, a[:]...) -} - -func appendUint32(b []byte, x uint32) []byte { - var a [4]byte - binary.BigEndian.PutUint32(a[:], x) - return append(b, a[:]...) -} - -func consumeUint64(b []byte) ([]byte, uint64) { - x := binary.BigEndian.Uint64(b) - return b[8:], x -} - -func consumeUint32(b []byte) ([]byte, uint32) { - x := binary.BigEndian.Uint32(b) - return b[4:], x -} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go deleted file mode 100644 index 56bfaaa1..00000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 && amd64 && gc && !purego -// +build go1.7,amd64,gc,!purego - -package blake2b - -import "golang.org/x/sys/cpu" - -func init() { - useAVX2 = cpu.X86.HasAVX2 - useAVX = cpu.X86.HasAVX - useSSE4 = cpu.X86.HasSSE41 -} - -//go:noescape -func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) - -//go:noescape -func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) - -//go:noescape -func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) - -func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { - switch { - case useAVX2: - hashBlocksAVX2(h, c, flag, blocks) - case useAVX: - hashBlocksAVX(h, c, flag, blocks) - case useSSE4: - hashBlocksSSE4(h, c, flag, blocks) - default: - hashBlocksGeneric(h, c, flag, blocks) - } -} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s deleted file mode 100644 index 4b9daa18..00000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s +++ /dev/null @@ -1,745 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 && amd64 && gc && !purego -// +build go1.7,amd64,gc,!purego - -#include "textflag.h" - -DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b -DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b -DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179 -GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403 -DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302 -DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b -DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b -DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 -GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16 - -#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 -#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 -#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e -#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 -#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 - -#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \ - VPADDQ m0, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFD $-79, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPSHUFB c40, Y1, Y1; \ - VPADDQ m1, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFB c48, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPADDQ Y1, Y1, t; \ - VPSRLQ $63, Y1, Y1; \ - VPXOR t, Y1, Y1; \ - VPERMQ_0x39_Y1_Y1; \ - VPERMQ_0x4E_Y2_Y2; \ - VPERMQ_0x93_Y3_Y3; \ - VPADDQ m2, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFD $-79, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPSHUFB c40, Y1, Y1; \ - VPADDQ m3, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFB c48, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPADDQ Y1, Y1, t; \ - VPSRLQ $63, Y1, Y1; \ - VPXOR t, Y1, Y1; \ - VPERMQ_0x39_Y3_Y3; \ - VPERMQ_0x4E_Y2_Y2; \ - VPERMQ_0x93_Y1_Y1 - -#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E -#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26 -#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E -#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36 -#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E - -#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n -#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n -#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n -#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n -#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n - -#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01 -#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01 -#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01 -#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01 -#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01 - -#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01 - -#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8 -#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01 - -// load msg: Y12 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \ - VMOVQ_SI_X12(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X12(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y12, Y12 - -// load msg: Y13 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \ - VMOVQ_SI_X13(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X13(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y13, Y13 - -// load msg: Y14 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \ - VMOVQ_SI_X14(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X14(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y14, Y14 - -// load msg: Y15 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \ - VMOVQ_SI_X15(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X15(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \ - VMOVQ_SI_X12_0; \ - VMOVQ_SI_X11(4*8); \ - VPINSRQ_1_SI_X12(2*8); \ - VPINSRQ_1_SI_X11(6*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \ - LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \ - LOAD_MSG_AVX2_Y15(9, 11, 13, 15) - -#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \ - LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \ - LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \ - VMOVQ_SI_X11(11*8); \ - VPSHUFD $0x4E, 0*8(SI), X14; \ - VPINSRQ_1_SI_X11(5*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - LOAD_MSG_AVX2_Y15(12, 2, 7, 3) - -#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \ - VMOVQ_SI_X11(5*8); \ - VMOVDQU 11*8(SI), X12; \ - VPINSRQ_1_SI_X11(15*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - VMOVQ_SI_X13(8*8); \ - VMOVQ_SI_X11(2*8); \ - VPINSRQ_1_SI_X13_0; \ - VPINSRQ_1_SI_X11(13*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \ - LOAD_MSG_AVX2_Y15(14, 6, 1, 4) - -#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \ - LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \ - LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \ - LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \ - VMOVQ_SI_X15(6*8); \ - VMOVQ_SI_X11_0; \ - VPINSRQ_1_SI_X15(10*8); \ - VPINSRQ_1_SI_X11(8*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \ - LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \ - VMOVQ_SI_X13_0; \ - VMOVQ_SI_X11(4*8); \ - VPINSRQ_1_SI_X13(7*8); \ - VPINSRQ_1_SI_X11(15*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \ - LOAD_MSG_AVX2_Y15(1, 12, 8, 13) - -#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X11_0; \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X11(8*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \ - LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \ - LOAD_MSG_AVX2_Y15(13, 5, 14, 9) - -#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \ - LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \ - LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \ - VMOVQ_SI_X14_0; \ - VPSHUFD $0x4E, 8*8(SI), X11; \ - VPINSRQ_1_SI_X14(6*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - LOAD_MSG_AVX2_Y15(7, 3, 2, 11) - -#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \ - LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \ - LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \ - LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \ - VMOVQ_SI_X15_0; \ - VMOVQ_SI_X11(6*8); \ - VPINSRQ_1_SI_X15(4*8); \ - VPINSRQ_1_SI_X11(10*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \ - VMOVQ_SI_X12(6*8); \ - VMOVQ_SI_X11(11*8); \ - VPINSRQ_1_SI_X12(14*8); \ - VPINSRQ_1_SI_X11_0; \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \ - VMOVQ_SI_X11(1*8); \ - VMOVDQU 12*8(SI), X14; \ - VPINSRQ_1_SI_X11(10*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - VMOVQ_SI_X15(2*8); \ - VMOVDQU 4*8(SI), X11; \ - VPINSRQ_1_SI_X15(7*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \ - LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \ - VMOVQ_SI_X13(2*8); \ - VPSHUFD $0x4E, 5*8(SI), X11; \ - VPINSRQ_1_SI_X13(4*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \ - VMOVQ_SI_X15(11*8); \ - VMOVQ_SI_X11(12*8); \ - VPINSRQ_1_SI_X15(14*8); \ - VPINSRQ_1_SI_X11_0; \ - VINSERTI128 $1, X11, Y15, Y15 - -// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, DX - ADDQ $31, DX - ANDQ $~31, DX - - MOVQ CX, 16(DX) - XORQ CX, CX - MOVQ CX, 24(DX) - - VMOVDQU ·AVX2_c40<>(SB), Y4 - VMOVDQU ·AVX2_c48<>(SB), Y5 - - VMOVDQU 0(AX), Y8 - VMOVDQU 32(AX), Y9 - VMOVDQU ·AVX2_iv0<>(SB), Y6 - VMOVDQU ·AVX2_iv1<>(SB), Y7 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 - MOVQ R9, 8(DX) - -loop: - ADDQ $128, R8 - MOVQ R8, 0(DX) - CMPQ R8, $128 - JGE noinc - INCQ R9 - MOVQ R9, 8(DX) - -noinc: - VMOVDQA Y8, Y0 - VMOVDQA Y9, Y1 - VMOVDQA Y6, Y2 - VPXOR 0(DX), Y7, Y3 - - LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() - VMOVDQA Y12, 32(DX) - VMOVDQA Y13, 64(DX) - VMOVDQA Y14, 96(DX) - VMOVDQA Y15, 128(DX) - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() - VMOVDQA Y12, 160(DX) - VMOVDQA Y13, 192(DX) - VMOVDQA Y14, 224(DX) - VMOVDQA Y15, 256(DX) - - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - - ROUND_AVX2(32(DX), 64(DX), 96(DX), 128(DX), Y10, Y4, Y5) - ROUND_AVX2(160(DX), 192(DX), 224(DX), 256(DX), Y10, Y4, Y5) - - VPXOR Y0, Y8, Y8 - VPXOR Y1, Y9, Y9 - VPXOR Y2, Y8, Y8 - VPXOR Y3, Y9, Y9 - - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop - - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) - - VMOVDQU Y8, 0(AX) - VMOVDQU Y9, 32(AX) - VZEROUPPER - - RET - -#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA -#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB -#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF -#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD -#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE - -#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7 -#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF -#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7 -#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF -#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7 -#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7 -#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF -#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF - -#define SHUFFLE_AVX() \ - VMOVDQA X6, X13; \ - VMOVDQA X2, X14; \ - VMOVDQA X4, X6; \ - VPUNPCKLQDQ_X13_X13_X15; \ - VMOVDQA X5, X4; \ - VMOVDQA X6, X5; \ - VPUNPCKHQDQ_X15_X7_X6; \ - VPUNPCKLQDQ_X7_X7_X15; \ - VPUNPCKHQDQ_X15_X13_X7; \ - VPUNPCKLQDQ_X3_X3_X15; \ - VPUNPCKHQDQ_X15_X2_X2; \ - VPUNPCKLQDQ_X14_X14_X15; \ - VPUNPCKHQDQ_X15_X3_X3; \ - -#define SHUFFLE_AVX_INV() \ - VMOVDQA X2, X13; \ - VMOVDQA X4, X14; \ - VPUNPCKLQDQ_X2_X2_X15; \ - VMOVDQA X5, X4; \ - VPUNPCKHQDQ_X15_X3_X2; \ - VMOVDQA X14, X5; \ - VPUNPCKLQDQ_X3_X3_X15; \ - VMOVDQA X6, X14; \ - VPUNPCKHQDQ_X15_X13_X3; \ - VPUNPCKLQDQ_X7_X7_X15; \ - VPUNPCKHQDQ_X15_X6_X6; \ - VPUNPCKLQDQ_X14_X14_X15; \ - VPUNPCKHQDQ_X15_X7_X7; \ - -#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ - VPADDQ m0, v0, v0; \ - VPADDQ v2, v0, v0; \ - VPADDQ m1, v1, v1; \ - VPADDQ v3, v1, v1; \ - VPXOR v0, v6, v6; \ - VPXOR v1, v7, v7; \ - VPSHUFD $-79, v6, v6; \ - VPSHUFD $-79, v7, v7; \ - VPADDQ v6, v4, v4; \ - VPADDQ v7, v5, v5; \ - VPXOR v4, v2, v2; \ - VPXOR v5, v3, v3; \ - VPSHUFB c40, v2, v2; \ - VPSHUFB c40, v3, v3; \ - VPADDQ m2, v0, v0; \ - VPADDQ v2, v0, v0; \ - VPADDQ m3, v1, v1; \ - VPADDQ v3, v1, v1; \ - VPXOR v0, v6, v6; \ - VPXOR v1, v7, v7; \ - VPSHUFB c48, v6, v6; \ - VPSHUFB c48, v7, v7; \ - VPADDQ v6, v4, v4; \ - VPADDQ v7, v5, v5; \ - VPXOR v4, v2, v2; \ - VPXOR v5, v3, v3; \ - VPADDQ v2, v2, t0; \ - VPSRLQ $63, v2, v2; \ - VPXOR t0, v2, v2; \ - VPADDQ v3, v3, t0; \ - VPSRLQ $63, v3, v3; \ - VPXOR t0, v3, v3 - -// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7) -// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0 -#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \ - VMOVQ_SI_X12(i0*8); \ - VMOVQ_SI_X13(i2*8); \ - VMOVQ_SI_X14(i4*8); \ - VMOVQ_SI_X15(i6*8); \ - VPINSRQ_1_SI_X12(i1*8); \ - VPINSRQ_1_SI_X13(i3*8); \ - VPINSRQ_1_SI_X14(i5*8); \ - VPINSRQ_1_SI_X15(i7*8) - -// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7) -#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \ - VMOVQ_SI_X12_0; \ - VMOVQ_SI_X13(4*8); \ - VMOVQ_SI_X14(1*8); \ - VMOVQ_SI_X15(5*8); \ - VPINSRQ_1_SI_X12(2*8); \ - VPINSRQ_1_SI_X13(6*8); \ - VPINSRQ_1_SI_X14(3*8); \ - VPINSRQ_1_SI_X15(7*8) - -// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3) -#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \ - VPSHUFD $0x4E, 0*8(SI), X12; \ - VMOVQ_SI_X13(11*8); \ - VMOVQ_SI_X14(12*8); \ - VMOVQ_SI_X15(7*8); \ - VPINSRQ_1_SI_X13(5*8); \ - VPINSRQ_1_SI_X14(2*8); \ - VPINSRQ_1_SI_X15(3*8) - -// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13) -#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \ - VMOVDQU 11*8(SI), X12; \ - VMOVQ_SI_X13(5*8); \ - VMOVQ_SI_X14(8*8); \ - VMOVQ_SI_X15(2*8); \ - VPINSRQ_1_SI_X13(15*8); \ - VPINSRQ_1_SI_X14_0; \ - VPINSRQ_1_SI_X15(13*8) - -// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8) -#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X13(4*8); \ - VMOVQ_SI_X14(6*8); \ - VMOVQ_SI_X15_0; \ - VPINSRQ_1_SI_X12(5*8); \ - VPINSRQ_1_SI_X13(15*8); \ - VPINSRQ_1_SI_X14(10*8); \ - VPINSRQ_1_SI_X15(8*8) - -// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15) -#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \ - VMOVQ_SI_X12(9*8); \ - VMOVQ_SI_X13(2*8); \ - VMOVQ_SI_X14_0; \ - VMOVQ_SI_X15(4*8); \ - VPINSRQ_1_SI_X12(5*8); \ - VPINSRQ_1_SI_X13(10*8); \ - VPINSRQ_1_SI_X14(7*8); \ - VPINSRQ_1_SI_X15(15*8) - -// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3) -#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X13_0; \ - VMOVQ_SI_X14(12*8); \ - VMOVQ_SI_X15(11*8); \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X13(8*8); \ - VPINSRQ_1_SI_X14(10*8); \ - VPINSRQ_1_SI_X15(3*8) - -// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11) -#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \ - MOVQ 0*8(SI), X12; \ - VPSHUFD $0x4E, 8*8(SI), X13; \ - MOVQ 7*8(SI), X14; \ - MOVQ 2*8(SI), X15; \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X14(3*8); \ - VPINSRQ_1_SI_X15(11*8) - -// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8) -#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \ - MOVQ 6*8(SI), X12; \ - MOVQ 11*8(SI), X13; \ - MOVQ 15*8(SI), X14; \ - MOVQ 3*8(SI), X15; \ - VPINSRQ_1_SI_X12(14*8); \ - VPINSRQ_1_SI_X13_0; \ - VPINSRQ_1_SI_X14(9*8); \ - VPINSRQ_1_SI_X15(8*8) - -// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10) -#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \ - MOVQ 5*8(SI), X12; \ - MOVQ 8*8(SI), X13; \ - MOVQ 0*8(SI), X14; \ - MOVQ 6*8(SI), X15; \ - VPINSRQ_1_SI_X12(15*8); \ - VPINSRQ_1_SI_X13(2*8); \ - VPINSRQ_1_SI_X14(4*8); \ - VPINSRQ_1_SI_X15(10*8) - -// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5) -#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \ - VMOVDQU 12*8(SI), X12; \ - MOVQ 1*8(SI), X13; \ - MOVQ 2*8(SI), X14; \ - VPINSRQ_1_SI_X13(10*8); \ - VPINSRQ_1_SI_X14(7*8); \ - VMOVDQU 4*8(SI), X15 - -// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0) -#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \ - MOVQ 15*8(SI), X12; \ - MOVQ 3*8(SI), X13; \ - MOVQ 11*8(SI), X14; \ - MOVQ 12*8(SI), X15; \ - VPINSRQ_1_SI_X12(9*8); \ - VPINSRQ_1_SI_X13(13*8); \ - VPINSRQ_1_SI_X14(14*8); \ - VPINSRQ_1_SI_X15_0 - -// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, R10 - ADDQ $15, R10 - ANDQ $~15, R10 - - VMOVDQU ·AVX_c40<>(SB), X0 - VMOVDQU ·AVX_c48<>(SB), X1 - VMOVDQA X0, X8 - VMOVDQA X1, X9 - - VMOVDQU ·AVX_iv3<>(SB), X0 - VMOVDQA X0, 0(R10) - XORQ CX, 0(R10) // 0(R10) = ·AVX_iv3 ^ (CX || 0) - - VMOVDQU 0(AX), X10 - VMOVDQU 16(AX), X11 - VMOVDQU 32(AX), X2 - VMOVDQU 48(AX), X3 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 - -loop: - ADDQ $128, R8 - CMPQ R8, $128 - JGE noinc - INCQ R9 - -noinc: - VMOVQ_R8_X15 - VPINSRQ_1_R9_X15 - - VMOVDQA X10, X0 - VMOVDQA X11, X1 - VMOVDQU ·AVX_iv0<>(SB), X4 - VMOVDQU ·AVX_iv1<>(SB), X5 - VMOVDQU ·AVX_iv2<>(SB), X6 - - VPXOR X15, X6, X6 - VMOVDQA 0(R10), X7 - - LOAD_MSG_AVX_0_2_4_6_1_3_5_7() - VMOVDQA X12, 16(R10) - VMOVDQA X13, 32(R10) - VMOVDQA X14, 48(R10) - VMOVDQA X15, 64(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15) - VMOVDQA X12, 80(R10) - VMOVDQA X13, 96(R10) - VMOVDQA X14, 112(R10) - VMOVDQA X15, 128(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6) - VMOVDQA X12, 144(R10) - VMOVDQA X13, 160(R10) - VMOVDQA X14, 176(R10) - VMOVDQA X15, 192(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_1_0_11_5_12_2_7_3() - VMOVDQA X12, 208(R10) - VMOVDQA X13, 224(R10) - VMOVDQA X14, 240(R10) - VMOVDQA X15, 256(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_11_12_5_15_8_0_2_13() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_2_5_4_15_6_10_0_8() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_9_5_2_10_0_7_4_15() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_2_6_0_8_12_10_11_3() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_0_6_9_8_7_3_2_11() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_5_15_8_2_0_4_6_10() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_6_14_11_0_15_9_3_8() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_12_13_1_10_2_7_4_5() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_15_9_3_13_11_14_12_0() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X15, X8, X9) - SHUFFLE_AVX() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X15, X8, X9) - SHUFFLE_AVX_INV() - - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X15, X8, X9) - SHUFFLE_AVX() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X15, X8, X9) - SHUFFLE_AVX_INV() - - VMOVDQU 32(AX), X14 - VMOVDQU 48(AX), X15 - VPXOR X0, X10, X10 - VPXOR X1, X11, X11 - VPXOR X2, X14, X14 - VPXOR X3, X15, X15 - VPXOR X4, X10, X10 - VPXOR X5, X11, X11 - VPXOR X6, X14, X2 - VPXOR X7, X15, X3 - VMOVDQU X2, 32(AX) - VMOVDQU X3, 48(AX) - - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop - - VMOVDQU X10, 0(AX) - VMOVDQU X11, 16(AX) - - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) - VZEROUPPER - - RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go deleted file mode 100644 index 5fa1b328..00000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 && amd64 && gc && !purego -// +build !go1.7,amd64,gc,!purego - -package blake2b - -import "golang.org/x/sys/cpu" - -func init() { - useSSE4 = cpu.X86.HasSSE41 -} - -//go:noescape -func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) - -func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { - if useSSE4 { - hashBlocksSSE4(h, c, flag, blocks) - } else { - hashBlocksGeneric(h, c, flag, blocks) - } -} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s deleted file mode 100644 index ae75eb9a..00000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build amd64 && gc && !purego -// +build amd64,gc,!purego - -#include "textflag.h" - -DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b -DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b -DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 -GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16 - -DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 - -#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v6, t1; \ - PUNPCKLQDQ v6, t2; \ - PUNPCKHQDQ v7, v6; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ v7, t2; \ - MOVO t1, v7; \ - MOVO v2, t1; \ - PUNPCKHQDQ t2, v7; \ - PUNPCKLQDQ v3, t2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v3 - -#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v2, t1; \ - PUNPCKLQDQ v2, t2; \ - PUNPCKHQDQ v3, v2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ v3, t2; \ - MOVO t1, v3; \ - MOVO v6, t1; \ - PUNPCKHQDQ t2, v3; \ - PUNPCKLQDQ v7, t2; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v7 - -#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ - PADDQ m0, v0; \ - PADDQ m1, v1; \ - PADDQ v2, v0; \ - PADDQ v3, v1; \ - PXOR v0, v6; \ - PXOR v1, v7; \ - PSHUFD $0xB1, v6, v6; \ - PSHUFD $0xB1, v7, v7; \ - PADDQ v6, v4; \ - PADDQ v7, v5; \ - PXOR v4, v2; \ - PXOR v5, v3; \ - PSHUFB c40, v2; \ - PSHUFB c40, v3; \ - PADDQ m2, v0; \ - PADDQ m3, v1; \ - PADDQ v2, v0; \ - PADDQ v3, v1; \ - PXOR v0, v6; \ - PXOR v1, v7; \ - PSHUFB c48, v6; \ - PSHUFB c48, v7; \ - PADDQ v6, v4; \ - PADDQ v7, v5; \ - PXOR v4, v2; \ - PXOR v5, v3; \ - MOVOU v2, t0; \ - PADDQ v2, t0; \ - PSRLQ $63, v2; \ - PXOR t0, v2; \ - MOVOU v3, t0; \ - PADDQ v3, t0; \ - PSRLQ $63, v3; \ - PXOR t0, v3 - -#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \ - MOVQ i0*8(src), m0; \ - PINSRQ $1, i1*8(src), m0; \ - MOVQ i2*8(src), m1; \ - PINSRQ $1, i3*8(src), m1; \ - MOVQ i4*8(src), m2; \ - PINSRQ $1, i5*8(src), m2; \ - MOVQ i6*8(src), m3; \ - PINSRQ $1, i7*8(src), m3 - -// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, R10 - ADDQ $15, R10 - ANDQ $~15, R10 - - MOVOU ·iv3<>(SB), X0 - MOVO X0, 0(R10) - XORQ CX, 0(R10) // 0(R10) = ·iv3 ^ (CX || 0) - - MOVOU ·c40<>(SB), X13 - MOVOU ·c48<>(SB), X14 - - MOVOU 0(AX), X12 - MOVOU 16(AX), X15 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 - -loop: - ADDQ $128, R8 - CMPQ R8, $128 - JGE noinc - INCQ R9 - -noinc: - MOVQ R8, X8 - PINSRQ $1, R9, X8 - - MOVO X12, X0 - MOVO X15, X1 - MOVOU 32(AX), X2 - MOVOU 48(AX), X3 - MOVOU ·iv0<>(SB), X4 - MOVOU ·iv1<>(SB), X5 - MOVOU ·iv2<>(SB), X6 - - PXOR X8, X6 - MOVO 0(R10), X7 - - LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) - MOVO X8, 16(R10) - MOVO X9, 32(R10) - MOVO X10, 48(R10) - MOVO X11, 64(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) - MOVO X8, 80(R10) - MOVO X9, 96(R10) - MOVO X10, 112(R10) - MOVO X11, 128(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) - MOVO X8, 144(R10) - MOVO X9, 160(R10) - MOVO X10, 176(R10) - MOVO X11, 192(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) - MOVO X8, 208(R10) - MOVO X9, 224(R10) - MOVO X10, 240(R10) - MOVO X11, 256(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - MOVOU 32(AX), X10 - MOVOU 48(AX), X11 - PXOR X0, X12 - PXOR X1, X15 - PXOR X2, X10 - PXOR X3, X11 - PXOR X4, X12 - PXOR X5, X15 - PXOR X6, X10 - PXOR X7, X11 - MOVOU X10, 32(AX) - MOVOU X11, 48(AX) - - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop - - MOVOU X12, 0(AX) - MOVOU X15, 16(AX) - - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) - - RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go deleted file mode 100644 index 3168a8aa..00000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package blake2b - -import ( - "encoding/binary" - "math/bits" -) - -// the precomputed values for BLAKE2b -// there are 12 16-byte arrays - one for each round -// the entries are calculated from the sigma constants. -var precomputed = [12][16]byte{ - {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, - {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, - {11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4}, - {7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8}, - {9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13}, - {2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9}, - {12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11}, - {13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10}, - {6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5}, - {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0}, - {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first - {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second -} - -func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { - var m [16]uint64 - c0, c1 := c[0], c[1] - - for i := 0; i < len(blocks); { - c0 += BlockSize - if c0 < BlockSize { - c1++ - } - - v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] - v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7] - v12 ^= c0 - v13 ^= c1 - v14 ^= flag - - for j := range m { - m[j] = binary.LittleEndian.Uint64(blocks[i:]) - i += 8 - } - - for j := range precomputed { - s := &(precomputed[j]) - - v0 += m[s[0]] - v0 += v4 - v12 ^= v0 - v12 = bits.RotateLeft64(v12, -32) - v8 += v12 - v4 ^= v8 - v4 = bits.RotateLeft64(v4, -24) - v1 += m[s[1]] - v1 += v5 - v13 ^= v1 - v13 = bits.RotateLeft64(v13, -32) - v9 += v13 - v5 ^= v9 - v5 = bits.RotateLeft64(v5, -24) - v2 += m[s[2]] - v2 += v6 - v14 ^= v2 - v14 = bits.RotateLeft64(v14, -32) - v10 += v14 - v6 ^= v10 - v6 = bits.RotateLeft64(v6, -24) - v3 += m[s[3]] - v3 += v7 - v15 ^= v3 - v15 = bits.RotateLeft64(v15, -32) - v11 += v15 - v7 ^= v11 - v7 = bits.RotateLeft64(v7, -24) - - v0 += m[s[4]] - v0 += v4 - v12 ^= v0 - v12 = bits.RotateLeft64(v12, -16) - v8 += v12 - v4 ^= v8 - v4 = bits.RotateLeft64(v4, -63) - v1 += m[s[5]] - v1 += v5 - v13 ^= v1 - v13 = bits.RotateLeft64(v13, -16) - v9 += v13 - v5 ^= v9 - v5 = bits.RotateLeft64(v5, -63) - v2 += m[s[6]] - v2 += v6 - v14 ^= v2 - v14 = bits.RotateLeft64(v14, -16) - v10 += v14 - v6 ^= v10 - v6 = bits.RotateLeft64(v6, -63) - v3 += m[s[7]] - v3 += v7 - v15 ^= v3 - v15 = bits.RotateLeft64(v15, -16) - v11 += v15 - v7 ^= v11 - v7 = bits.RotateLeft64(v7, -63) - - v0 += m[s[8]] - v0 += v5 - v15 ^= v0 - v15 = bits.RotateLeft64(v15, -32) - v10 += v15 - v5 ^= v10 - v5 = bits.RotateLeft64(v5, -24) - v1 += m[s[9]] - v1 += v6 - v12 ^= v1 - v12 = bits.RotateLeft64(v12, -32) - v11 += v12 - v6 ^= v11 - v6 = bits.RotateLeft64(v6, -24) - v2 += m[s[10]] - v2 += v7 - v13 ^= v2 - v13 = bits.RotateLeft64(v13, -32) - v8 += v13 - v7 ^= v8 - v7 = bits.RotateLeft64(v7, -24) - v3 += m[s[11]] - v3 += v4 - v14 ^= v3 - v14 = bits.RotateLeft64(v14, -32) - v9 += v14 - v4 ^= v9 - v4 = bits.RotateLeft64(v4, -24) - - v0 += m[s[12]] - v0 += v5 - v15 ^= v0 - v15 = bits.RotateLeft64(v15, -16) - v10 += v15 - v5 ^= v10 - v5 = bits.RotateLeft64(v5, -63) - v1 += m[s[13]] - v1 += v6 - v12 ^= v1 - v12 = bits.RotateLeft64(v12, -16) - v11 += v12 - v6 ^= v11 - v6 = bits.RotateLeft64(v6, -63) - v2 += m[s[14]] - v2 += v7 - v13 ^= v2 - v13 = bits.RotateLeft64(v13, -16) - v8 += v13 - v7 ^= v8 - v7 = bits.RotateLeft64(v7, -63) - v3 += m[s[15]] - v3 += v4 - v14 ^= v3 - v14 = bits.RotateLeft64(v14, -16) - v9 += v14 - v4 ^= v9 - v4 = bits.RotateLeft64(v4, -63) - - } - - h[0] ^= v0 ^ v8 - h[1] ^= v1 ^ v9 - h[2] ^= v2 ^ v10 - h[3] ^= v3 ^ v11 - h[4] ^= v4 ^ v12 - h[5] ^= v5 ^ v13 - h[6] ^= v6 ^ v14 - h[7] ^= v7 ^ v15 - } - c[0], c[1] = c0, c1 -} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go deleted file mode 100644 index b0137cdf..00000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !amd64 || purego || !gc -// +build !amd64 purego !gc - -package blake2b - -func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { - hashBlocksGeneric(h, c, flag, blocks) -} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2x.go b/vendor/golang.org/x/crypto/blake2b/blake2x.go deleted file mode 100644 index 52c414db..00000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2x.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package blake2b - -import ( - "encoding/binary" - "errors" - "io" -) - -// XOF defines the interface to hash functions that -// support arbitrary-length output. -type XOF interface { - // Write absorbs more data into the hash's state. It panics if called - // after Read. - io.Writer - - // Read reads more output from the hash. It returns io.EOF if the limit - // has been reached. - io.Reader - - // Clone returns a copy of the XOF in its current state. - Clone() XOF - - // Reset resets the XOF to its initial state. - Reset() -} - -// OutputLengthUnknown can be used as the size argument to NewXOF to indicate -// the length of the output is not known in advance. -const OutputLengthUnknown = 0 - -// magicUnknownOutputLength is a magic value for the output size that indicates -// an unknown number of output bytes. -const magicUnknownOutputLength = (1 << 32) - 1 - -// maxOutputLength is the absolute maximum number of bytes to produce when the -// number of output bytes is unknown. -const maxOutputLength = (1 << 32) * 64 - -// NewXOF creates a new variable-output-length hash. The hash either produce a -// known number of bytes (1 <= size < 2**32-1), or an unknown number of bytes -// (size == OutputLengthUnknown). In the latter case, an absolute limit of -// 256GiB applies. -// -// A non-nil key turns the hash into a MAC. The key must between -// zero and 32 bytes long. -func NewXOF(size uint32, key []byte) (XOF, error) { - if len(key) > Size { - return nil, errKeySize - } - if size == magicUnknownOutputLength { - // 2^32-1 indicates an unknown number of bytes and thus isn't a - // valid length. - return nil, errors.New("blake2b: XOF length too large") - } - if size == OutputLengthUnknown { - size = magicUnknownOutputLength - } - x := &xof{ - d: digest{ - size: Size, - keyLen: len(key), - }, - length: size, - } - copy(x.d.key[:], key) - x.Reset() - return x, nil -} - -type xof struct { - d digest - length uint32 - remaining uint64 - cfg, root, block [Size]byte - offset int - nodeOffset uint32 - readMode bool -} - -func (x *xof) Write(p []byte) (n int, err error) { - if x.readMode { - panic("blake2b: write to XOF after read") - } - return x.d.Write(p) -} - -func (x *xof) Clone() XOF { - clone := *x - return &clone -} - -func (x *xof) Reset() { - x.cfg[0] = byte(Size) - binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length - binary.LittleEndian.PutUint32(x.cfg[12:], x.length) // XOF length - x.cfg[17] = byte(Size) // inner hash size - - x.d.Reset() - x.d.h[1] ^= uint64(x.length) << 32 - - x.remaining = uint64(x.length) - if x.remaining == magicUnknownOutputLength { - x.remaining = maxOutputLength - } - x.offset, x.nodeOffset = 0, 0 - x.readMode = false -} - -func (x *xof) Read(p []byte) (n int, err error) { - if !x.readMode { - x.d.finalize(&x.root) - x.readMode = true - } - - if x.remaining == 0 { - return 0, io.EOF - } - - n = len(p) - if uint64(n) > x.remaining { - n = int(x.remaining) - p = p[:n] - } - - if x.offset > 0 { - blockRemaining := Size - x.offset - if n < blockRemaining { - x.offset += copy(p, x.block[x.offset:]) - x.remaining -= uint64(n) - return - } - copy(p, x.block[x.offset:]) - p = p[blockRemaining:] - x.offset = 0 - x.remaining -= uint64(blockRemaining) - } - - for len(p) >= Size { - binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) - x.nodeOffset++ - - x.d.initConfig(&x.cfg) - x.d.Write(x.root[:]) - x.d.finalize(&x.block) - - copy(p, x.block[:]) - p = p[Size:] - x.remaining -= uint64(Size) - } - - if todo := len(p); todo > 0 { - if x.remaining < uint64(Size) { - x.cfg[0] = byte(x.remaining) - } - binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) - x.nodeOffset++ - - x.d.initConfig(&x.cfg) - x.d.Write(x.root[:]) - x.d.finalize(&x.block) - - x.offset = copy(p, x.block[:todo]) - x.remaining -= uint64(todo) - } - return -} - -func (d *digest) initConfig(cfg *[Size]byte) { - d.offset, d.c[0], d.c[1] = 0, 0, 0 - for i := range d.h { - d.h[i] = iv[i] ^ binary.LittleEndian.Uint64(cfg[i*8:]) - } -} diff --git a/vendor/golang.org/x/crypto/blake2b/register.go b/vendor/golang.org/x/crypto/blake2b/register.go deleted file mode 100644 index 9d863396..00000000 --- a/vendor/golang.org/x/crypto/blake2b/register.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.9 -// +build go1.9 - -package blake2b - -import ( - "crypto" - "hash" -) - -func init() { - newHash256 := func() hash.Hash { - h, _ := New256(nil) - return h - } - newHash384 := func() hash.Hash { - h, _ := New384(nil) - return h - } - - newHash512 := func() hash.Hash { - h, _ := New512(nil) - return h - } - - crypto.RegisterHash(crypto.BLAKE2b_256, newHash256) - crypto.RegisterHash(crypto.BLAKE2b_384, newHash384) - crypto.RegisterHash(crypto.BLAKE2b_512, newHash512) -} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go deleted file mode 100644 index 00f963ea..00000000 --- a/vendor/golang.org/x/crypto/curve25519/curve25519.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package curve25519 provides an implementation of the X25519 function, which -// performs scalar multiplication on the elliptic curve known as Curve25519. -// See RFC 7748. -// -// Starting in Go 1.20, this package is a wrapper for the X25519 implementation -// in the crypto/ecdh package. -package curve25519 // import "golang.org/x/crypto/curve25519" - -// ScalarMult sets dst to the product scalar * point. -// -// Deprecated: when provided a low-order point, ScalarMult will set dst to all -// zeroes, irrespective of the scalar. Instead, use the X25519 function, which -// will return an error. -func ScalarMult(dst, scalar, point *[32]byte) { - scalarMult(dst, scalar, point) -} - -// ScalarBaseMult sets dst to the product scalar * base where base is the -// standard generator. -// -// It is recommended to use the X25519 function with Basepoint instead, as -// copying into fixed size arrays can lead to unexpected bugs. -func ScalarBaseMult(dst, scalar *[32]byte) { - scalarBaseMult(dst, scalar) -} - -const ( - // ScalarSize is the size of the scalar input to X25519. - ScalarSize = 32 - // PointSize is the size of the point input to X25519. - PointSize = 32 -) - -// Basepoint is the canonical Curve25519 generator. -var Basepoint []byte - -var basePoint = [32]byte{9} - -func init() { Basepoint = basePoint[:] } - -// X25519 returns the result of the scalar multiplication (scalar * point), -// according to RFC 7748, Section 5. scalar, point and the return value are -// slices of 32 bytes. -// -// scalar can be generated at random, for example with crypto/rand. point should -// be either Basepoint or the output of another X25519 call. -// -// If point is Basepoint (but not if it's a different slice with the same -// contents) a precomputed implementation might be used for performance. -func X25519(scalar, point []byte) ([]byte, error) { - // Outline the body of function, to let the allocation be inlined in the - // caller, and possibly avoid escaping to the heap. - var dst [32]byte - return x25519(&dst, scalar, point) -} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_compat.go b/vendor/golang.org/x/crypto/curve25519/curve25519_compat.go deleted file mode 100644 index ba647e8d..00000000 --- a/vendor/golang.org/x/crypto/curve25519/curve25519_compat.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.20 - -package curve25519 - -import ( - "crypto/subtle" - "errors" - "strconv" - - "golang.org/x/crypto/curve25519/internal/field" -) - -func scalarMult(dst, scalar, point *[32]byte) { - var e [32]byte - - copy(e[:], scalar[:]) - e[0] &= 248 - e[31] &= 127 - e[31] |= 64 - - var x1, x2, z2, x3, z3, tmp0, tmp1 field.Element - x1.SetBytes(point[:]) - x2.One() - x3.Set(&x1) - z3.One() - - swap := 0 - for pos := 254; pos >= 0; pos-- { - b := e[pos/8] >> uint(pos&7) - b &= 1 - swap ^= int(b) - x2.Swap(&x3, swap) - z2.Swap(&z3, swap) - swap = int(b) - - tmp0.Subtract(&x3, &z3) - tmp1.Subtract(&x2, &z2) - x2.Add(&x2, &z2) - z2.Add(&x3, &z3) - z3.Multiply(&tmp0, &x2) - z2.Multiply(&z2, &tmp1) - tmp0.Square(&tmp1) - tmp1.Square(&x2) - x3.Add(&z3, &z2) - z2.Subtract(&z3, &z2) - x2.Multiply(&tmp1, &tmp0) - tmp1.Subtract(&tmp1, &tmp0) - z2.Square(&z2) - - z3.Mult32(&tmp1, 121666) - x3.Square(&x3) - tmp0.Add(&tmp0, &z3) - z3.Multiply(&x1, &z2) - z2.Multiply(&tmp1, &tmp0) - } - - x2.Swap(&x3, swap) - z2.Swap(&z3, swap) - - z2.Invert(&z2) - x2.Multiply(&x2, &z2) - copy(dst[:], x2.Bytes()) -} - -func scalarBaseMult(dst, scalar *[32]byte) { - checkBasepoint() - scalarMult(dst, scalar, &basePoint) -} - -func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) { - var in [32]byte - if l := len(scalar); l != 32 { - return nil, errors.New("bad scalar length: " + strconv.Itoa(l) + ", expected 32") - } - if l := len(point); l != 32 { - return nil, errors.New("bad point length: " + strconv.Itoa(l) + ", expected 32") - } - copy(in[:], scalar) - if &point[0] == &Basepoint[0] { - scalarBaseMult(dst, &in) - } else { - var base, zero [32]byte - copy(base[:], point) - scalarMult(dst, &in, &base) - if subtle.ConstantTimeCompare(dst[:], zero[:]) == 1 { - return nil, errors.New("bad input point: low order point") - } - } - return dst[:], nil -} - -func checkBasepoint() { - if subtle.ConstantTimeCompare(Basepoint, []byte{ - 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }) != 1 { - panic("curve25519: global Basepoint value was modified") - } -} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_go120.go b/vendor/golang.org/x/crypto/curve25519/curve25519_go120.go deleted file mode 100644 index 627df497..00000000 --- a/vendor/golang.org/x/crypto/curve25519/curve25519_go120.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.20 - -package curve25519 - -import "crypto/ecdh" - -func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) { - curve := ecdh.X25519() - pub, err := curve.NewPublicKey(point) - if err != nil { - return nil, err - } - priv, err := curve.NewPrivateKey(scalar) - if err != nil { - return nil, err - } - out, err := priv.ECDH(pub) - if err != nil { - return nil, err - } - copy(dst[:], out) - return dst[:], nil -} - -func scalarMult(dst, scalar, point *[32]byte) { - if _, err := x25519(dst, scalar[:], point[:]); err != nil { - // The only error condition for x25519 when the inputs are 32 bytes long - // is if the output would have been the all-zero value. - for i := range dst { - dst[i] = 0 - } - } -} - -func scalarBaseMult(dst, scalar *[32]byte) { - curve := ecdh.X25519() - priv, err := curve.NewPrivateKey(scalar[:]) - if err != nil { - panic("curve25519: internal error: scalarBaseMult was not 32 bytes") - } - copy(dst[:], priv.PublicKey().Bytes()) -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/README b/vendor/golang.org/x/crypto/curve25519/internal/field/README deleted file mode 100644 index e25bca7d..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/README +++ /dev/null @@ -1,7 +0,0 @@ -This package is kept in sync with crypto/ed25519/internal/edwards25519/field in -the standard library. - -If there are any changes in the standard library that need to be synced to this -package, run sync.sh. It will not overwrite any local changes made since the -previous sync, so it's ok to land changes in this package first, and then sync -to the standard library later. diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go deleted file mode 100644 index ca841ad9..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go +++ /dev/null @@ -1,416 +0,0 @@ -// Copyright (c) 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package field implements fast arithmetic modulo 2^255-19. -package field - -import ( - "crypto/subtle" - "encoding/binary" - "math/bits" -) - -// Element represents an element of the field GF(2^255-19). Note that this -// is not a cryptographically secure group, and should only be used to interact -// with edwards25519.Point coordinates. -// -// This type works similarly to math/big.Int, and all arguments and receivers -// are allowed to alias. -// -// The zero value is a valid zero element. -type Element struct { - // An element t represents the integer - // t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204 - // - // Between operations, all limbs are expected to be lower than 2^52. - l0 uint64 - l1 uint64 - l2 uint64 - l3 uint64 - l4 uint64 -} - -const maskLow51Bits uint64 = (1 << 51) - 1 - -var feZero = &Element{0, 0, 0, 0, 0} - -// Zero sets v = 0, and returns v. -func (v *Element) Zero() *Element { - *v = *feZero - return v -} - -var feOne = &Element{1, 0, 0, 0, 0} - -// One sets v = 1, and returns v. -func (v *Element) One() *Element { - *v = *feOne - return v -} - -// reduce reduces v modulo 2^255 - 19 and returns it. -func (v *Element) reduce() *Element { - v.carryPropagate() - - // After the light reduction we now have a field element representation - // v < 2^255 + 2^13 * 19, but need v < 2^255 - 19. - - // If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1, - // generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise. - c := (v.l0 + 19) >> 51 - c = (v.l1 + c) >> 51 - c = (v.l2 + c) >> 51 - c = (v.l3 + c) >> 51 - c = (v.l4 + c) >> 51 - - // If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's - // effectively applying the reduction identity to the carry. - v.l0 += 19 * c - - v.l1 += v.l0 >> 51 - v.l0 = v.l0 & maskLow51Bits - v.l2 += v.l1 >> 51 - v.l1 = v.l1 & maskLow51Bits - v.l3 += v.l2 >> 51 - v.l2 = v.l2 & maskLow51Bits - v.l4 += v.l3 >> 51 - v.l3 = v.l3 & maskLow51Bits - // no additional carry - v.l4 = v.l4 & maskLow51Bits - - return v -} - -// Add sets v = a + b, and returns v. -func (v *Element) Add(a, b *Element) *Element { - v.l0 = a.l0 + b.l0 - v.l1 = a.l1 + b.l1 - v.l2 = a.l2 + b.l2 - v.l3 = a.l3 + b.l3 - v.l4 = a.l4 + b.l4 - // Using the generic implementation here is actually faster than the - // assembly. Probably because the body of this function is so simple that - // the compiler can figure out better optimizations by inlining the carry - // propagation. TODO - return v.carryPropagateGeneric() -} - -// Subtract sets v = a - b, and returns v. -func (v *Element) Subtract(a, b *Element) *Element { - // We first add 2 * p, to guarantee the subtraction won't underflow, and - // then subtract b (which can be up to 2^255 + 2^13 * 19). - v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0 - v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1 - v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2 - v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3 - v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4 - return v.carryPropagate() -} - -// Negate sets v = -a, and returns v. -func (v *Element) Negate(a *Element) *Element { - return v.Subtract(feZero, a) -} - -// Invert sets v = 1/z mod p, and returns v. -// -// If z == 0, Invert returns v = 0. -func (v *Element) Invert(z *Element) *Element { - // Inversion is implemented as exponentiation with exponent p − 2. It uses the - // same sequence of 255 squarings and 11 multiplications as [Curve25519]. - var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element - - z2.Square(z) // 2 - t.Square(&z2) // 4 - t.Square(&t) // 8 - z9.Multiply(&t, z) // 9 - z11.Multiply(&z9, &z2) // 11 - t.Square(&z11) // 22 - z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0 - - t.Square(&z2_5_0) // 2^6 - 2^1 - for i := 0; i < 4; i++ { - t.Square(&t) // 2^10 - 2^5 - } - z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0 - - t.Square(&z2_10_0) // 2^11 - 2^1 - for i := 0; i < 9; i++ { - t.Square(&t) // 2^20 - 2^10 - } - z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0 - - t.Square(&z2_20_0) // 2^21 - 2^1 - for i := 0; i < 19; i++ { - t.Square(&t) // 2^40 - 2^20 - } - t.Multiply(&t, &z2_20_0) // 2^40 - 2^0 - - t.Square(&t) // 2^41 - 2^1 - for i := 0; i < 9; i++ { - t.Square(&t) // 2^50 - 2^10 - } - z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0 - - t.Square(&z2_50_0) // 2^51 - 2^1 - for i := 0; i < 49; i++ { - t.Square(&t) // 2^100 - 2^50 - } - z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0 - - t.Square(&z2_100_0) // 2^101 - 2^1 - for i := 0; i < 99; i++ { - t.Square(&t) // 2^200 - 2^100 - } - t.Multiply(&t, &z2_100_0) // 2^200 - 2^0 - - t.Square(&t) // 2^201 - 2^1 - for i := 0; i < 49; i++ { - t.Square(&t) // 2^250 - 2^50 - } - t.Multiply(&t, &z2_50_0) // 2^250 - 2^0 - - t.Square(&t) // 2^251 - 2^1 - t.Square(&t) // 2^252 - 2^2 - t.Square(&t) // 2^253 - 2^3 - t.Square(&t) // 2^254 - 2^4 - t.Square(&t) // 2^255 - 2^5 - - return v.Multiply(&t, &z11) // 2^255 - 21 -} - -// Set sets v = a, and returns v. -func (v *Element) Set(a *Element) *Element { - *v = *a - return v -} - -// SetBytes sets v to x, which must be a 32-byte little-endian encoding. -// -// Consistent with RFC 7748, the most significant bit (the high bit of the -// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1) -// are accepted. Note that this is laxer than specified by RFC 8032. -func (v *Element) SetBytes(x []byte) *Element { - if len(x) != 32 { - panic("edwards25519: invalid field element input size") - } - - // Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51). - v.l0 = binary.LittleEndian.Uint64(x[0:8]) - v.l0 &= maskLow51Bits - // Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51). - v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3 - v.l1 &= maskLow51Bits - // Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51). - v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6 - v.l2 &= maskLow51Bits - // Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51). - v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1 - v.l3 &= maskLow51Bits - // Bits 204:251 (bytes 24:32, bits 192:256, shift 12, mask 51). - // Note: not bytes 25:33, shift 4, to avoid overread. - v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12 - v.l4 &= maskLow51Bits - - return v -} - -// Bytes returns the canonical 32-byte little-endian encoding of v. -func (v *Element) Bytes() []byte { - // This function is outlined to make the allocations inline in the caller - // rather than happen on the heap. - var out [32]byte - return v.bytes(&out) -} - -func (v *Element) bytes(out *[32]byte) []byte { - t := *v - t.reduce() - - var buf [8]byte - for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} { - bitsOffset := i * 51 - binary.LittleEndian.PutUint64(buf[:], l<= len(out) { - break - } - out[off] |= bb - } - } - - return out[:] -} - -// Equal returns 1 if v and u are equal, and 0 otherwise. -func (v *Element) Equal(u *Element) int { - sa, sv := u.Bytes(), v.Bytes() - return subtle.ConstantTimeCompare(sa, sv) -} - -// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise. -func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) } - -// Select sets v to a if cond == 1, and to b if cond == 0. -func (v *Element) Select(a, b *Element, cond int) *Element { - m := mask64Bits(cond) - v.l0 = (m & a.l0) | (^m & b.l0) - v.l1 = (m & a.l1) | (^m & b.l1) - v.l2 = (m & a.l2) | (^m & b.l2) - v.l3 = (m & a.l3) | (^m & b.l3) - v.l4 = (m & a.l4) | (^m & b.l4) - return v -} - -// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v. -func (v *Element) Swap(u *Element, cond int) { - m := mask64Bits(cond) - t := m & (v.l0 ^ u.l0) - v.l0 ^= t - u.l0 ^= t - t = m & (v.l1 ^ u.l1) - v.l1 ^= t - u.l1 ^= t - t = m & (v.l2 ^ u.l2) - v.l2 ^= t - u.l2 ^= t - t = m & (v.l3 ^ u.l3) - v.l3 ^= t - u.l3 ^= t - t = m & (v.l4 ^ u.l4) - v.l4 ^= t - u.l4 ^= t -} - -// IsNegative returns 1 if v is negative, and 0 otherwise. -func (v *Element) IsNegative() int { - return int(v.Bytes()[0] & 1) -} - -// Absolute sets v to |u|, and returns v. -func (v *Element) Absolute(u *Element) *Element { - return v.Select(new(Element).Negate(u), u, u.IsNegative()) -} - -// Multiply sets v = x * y, and returns v. -func (v *Element) Multiply(x, y *Element) *Element { - feMul(v, x, y) - return v -} - -// Square sets v = x * x, and returns v. -func (v *Element) Square(x *Element) *Element { - feSquare(v, x) - return v -} - -// Mult32 sets v = x * y, and returns v. -func (v *Element) Mult32(x *Element, y uint32) *Element { - x0lo, x0hi := mul51(x.l0, y) - x1lo, x1hi := mul51(x.l1, y) - x2lo, x2hi := mul51(x.l2, y) - x3lo, x3hi := mul51(x.l3, y) - x4lo, x4hi := mul51(x.l4, y) - v.l0 = x0lo + 19*x4hi // carried over per the reduction identity - v.l1 = x1lo + x0hi - v.l2 = x2lo + x1hi - v.l3 = x3lo + x2hi - v.l4 = x4lo + x3hi - // The hi portions are going to be only 32 bits, plus any previous excess, - // so we can skip the carry propagation. - return v -} - -// mul51 returns lo + hi * 2⁵¹ = a * b. -func mul51(a uint64, b uint32) (lo uint64, hi uint64) { - mh, ml := bits.Mul64(a, uint64(b)) - lo = ml & maskLow51Bits - hi = (mh << 13) | (ml >> 51) - return -} - -// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3. -func (v *Element) Pow22523(x *Element) *Element { - var t0, t1, t2 Element - - t0.Square(x) // x^2 - t1.Square(&t0) // x^4 - t1.Square(&t1) // x^8 - t1.Multiply(x, &t1) // x^9 - t0.Multiply(&t0, &t1) // x^11 - t0.Square(&t0) // x^22 - t0.Multiply(&t1, &t0) // x^31 - t1.Square(&t0) // x^62 - for i := 1; i < 5; i++ { // x^992 - t1.Square(&t1) - } - t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1 - t1.Square(&t0) // 2^11 - 2 - for i := 1; i < 10; i++ { // 2^20 - 2^10 - t1.Square(&t1) - } - t1.Multiply(&t1, &t0) // 2^20 - 1 - t2.Square(&t1) // 2^21 - 2 - for i := 1; i < 20; i++ { // 2^40 - 2^20 - t2.Square(&t2) - } - t1.Multiply(&t2, &t1) // 2^40 - 1 - t1.Square(&t1) // 2^41 - 2 - for i := 1; i < 10; i++ { // 2^50 - 2^10 - t1.Square(&t1) - } - t0.Multiply(&t1, &t0) // 2^50 - 1 - t1.Square(&t0) // 2^51 - 2 - for i := 1; i < 50; i++ { // 2^100 - 2^50 - t1.Square(&t1) - } - t1.Multiply(&t1, &t0) // 2^100 - 1 - t2.Square(&t1) // 2^101 - 2 - for i := 1; i < 100; i++ { // 2^200 - 2^100 - t2.Square(&t2) - } - t1.Multiply(&t2, &t1) // 2^200 - 1 - t1.Square(&t1) // 2^201 - 2 - for i := 1; i < 50; i++ { // 2^250 - 2^50 - t1.Square(&t1) - } - t0.Multiply(&t1, &t0) // 2^250 - 1 - t0.Square(&t0) // 2^251 - 2 - t0.Square(&t0) // 2^252 - 4 - return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3) -} - -// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion. -var sqrtM1 = &Element{1718705420411056, 234908883556509, - 2233514472574048, 2117202627021982, 765476049583133} - -// SqrtRatio sets r to the non-negative square root of the ratio of u and v. -// -// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio -// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00, -// and returns r and 0. -func (r *Element) SqrtRatio(u, v *Element) (rr *Element, wasSquare int) { - var a, b Element - - // r = (u * v3) * (u * v7)^((p-5)/8) - v2 := a.Square(v) - uv3 := b.Multiply(u, b.Multiply(v2, v)) - uv7 := a.Multiply(uv3, a.Square(v2)) - r.Multiply(uv3, r.Pow22523(uv7)) - - check := a.Multiply(v, a.Square(r)) // check = v * r^2 - - uNeg := b.Negate(u) - correctSignSqrt := check.Equal(u) - flippedSignSqrt := check.Equal(uNeg) - flippedSignSqrtI := check.Equal(uNeg.Multiply(uNeg, sqrtM1)) - - rPrime := b.Multiply(r, sqrtM1) // r_prime = SQRT_M1 * r - // r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r) - r.Select(rPrime, r, flippedSignSqrt|flippedSignSqrtI) - - r.Absolute(r) // Choose the nonnegative square root. - return r, correctSignSqrt | flippedSignSqrt -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go deleted file mode 100644 index edcf163c..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. - -//go:build amd64 && gc && !purego -// +build amd64,gc,!purego - -package field - -// feMul sets out = a * b. It works like feMulGeneric. -// -//go:noescape -func feMul(out *Element, a *Element, b *Element) - -// feSquare sets out = a * a. It works like feSquareGeneric. -// -//go:noescape -func feSquare(out *Element, a *Element) diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s deleted file mode 100644 index 293f013c..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s +++ /dev/null @@ -1,379 +0,0 @@ -// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. - -//go:build amd64 && gc && !purego -// +build amd64,gc,!purego - -#include "textflag.h" - -// func feMul(out *Element, a *Element, b *Element) -TEXT ·feMul(SB), NOSPLIT, $0-24 - MOVQ a+8(FP), CX - MOVQ b+16(FP), BX - - // r0 = a0×b0 - MOVQ (CX), AX - MULQ (BX) - MOVQ AX, DI - MOVQ DX, SI - - // r0 += 19×a1×b4 - MOVQ 8(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r0 += 19×a2×b3 - MOVQ 16(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r0 += 19×a3×b2 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 16(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r0 += 19×a4×b1 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 8(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r1 = a0×b1 - MOVQ (CX), AX - MULQ 8(BX) - MOVQ AX, R9 - MOVQ DX, R8 - - // r1 += a1×b0 - MOVQ 8(CX), AX - MULQ (BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r1 += 19×a2×b4 - MOVQ 16(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r1 += 19×a3×b3 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r1 += 19×a4×b2 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 16(BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r2 = a0×b2 - MOVQ (CX), AX - MULQ 16(BX) - MOVQ AX, R11 - MOVQ DX, R10 - - // r2 += a1×b1 - MOVQ 8(CX), AX - MULQ 8(BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r2 += a2×b0 - MOVQ 16(CX), AX - MULQ (BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r2 += 19×a3×b4 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r2 += 19×a4×b3 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r3 = a0×b3 - MOVQ (CX), AX - MULQ 24(BX) - MOVQ AX, R13 - MOVQ DX, R12 - - // r3 += a1×b2 - MOVQ 8(CX), AX - MULQ 16(BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r3 += a2×b1 - MOVQ 16(CX), AX - MULQ 8(BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r3 += a3×b0 - MOVQ 24(CX), AX - MULQ (BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r3 += 19×a4×b4 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r4 = a0×b4 - MOVQ (CX), AX - MULQ 32(BX) - MOVQ AX, R15 - MOVQ DX, R14 - - // r4 += a1×b3 - MOVQ 8(CX), AX - MULQ 24(BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // r4 += a2×b2 - MOVQ 16(CX), AX - MULQ 16(BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // r4 += a3×b1 - MOVQ 24(CX), AX - MULQ 8(BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // r4 += a4×b0 - MOVQ 32(CX), AX - MULQ (BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // First reduction chain - MOVQ $0x0007ffffffffffff, AX - SHLQ $0x0d, DI, SI - SHLQ $0x0d, R9, R8 - SHLQ $0x0d, R11, R10 - SHLQ $0x0d, R13, R12 - SHLQ $0x0d, R15, R14 - ANDQ AX, DI - IMUL3Q $0x13, R14, R14 - ADDQ R14, DI - ANDQ AX, R9 - ADDQ SI, R9 - ANDQ AX, R11 - ADDQ R8, R11 - ANDQ AX, R13 - ADDQ R10, R13 - ANDQ AX, R15 - ADDQ R12, R15 - - // Second reduction chain (carryPropagate) - MOVQ DI, SI - SHRQ $0x33, SI - MOVQ R9, R8 - SHRQ $0x33, R8 - MOVQ R11, R10 - SHRQ $0x33, R10 - MOVQ R13, R12 - SHRQ $0x33, R12 - MOVQ R15, R14 - SHRQ $0x33, R14 - ANDQ AX, DI - IMUL3Q $0x13, R14, R14 - ADDQ R14, DI - ANDQ AX, R9 - ADDQ SI, R9 - ANDQ AX, R11 - ADDQ R8, R11 - ANDQ AX, R13 - ADDQ R10, R13 - ANDQ AX, R15 - ADDQ R12, R15 - - // Store output - MOVQ out+0(FP), AX - MOVQ DI, (AX) - MOVQ R9, 8(AX) - MOVQ R11, 16(AX) - MOVQ R13, 24(AX) - MOVQ R15, 32(AX) - RET - -// func feSquare(out *Element, a *Element) -TEXT ·feSquare(SB), NOSPLIT, $0-16 - MOVQ a+8(FP), CX - - // r0 = l0×l0 - MOVQ (CX), AX - MULQ (CX) - MOVQ AX, SI - MOVQ DX, BX - - // r0 += 38×l1×l4 - MOVQ 8(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 32(CX) - ADDQ AX, SI - ADCQ DX, BX - - // r0 += 38×l2×l3 - MOVQ 16(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 24(CX) - ADDQ AX, SI - ADCQ DX, BX - - // r1 = 2×l0×l1 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 8(CX) - MOVQ AX, R8 - MOVQ DX, DI - - // r1 += 38×l2×l4 - MOVQ 16(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 32(CX) - ADDQ AX, R8 - ADCQ DX, DI - - // r1 += 19×l3×l3 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(CX) - ADDQ AX, R8 - ADCQ DX, DI - - // r2 = 2×l0×l2 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 16(CX) - MOVQ AX, R10 - MOVQ DX, R9 - - // r2 += l1×l1 - MOVQ 8(CX), AX - MULQ 8(CX) - ADDQ AX, R10 - ADCQ DX, R9 - - // r2 += 38×l3×l4 - MOVQ 24(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 32(CX) - ADDQ AX, R10 - ADCQ DX, R9 - - // r3 = 2×l0×l3 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 24(CX) - MOVQ AX, R12 - MOVQ DX, R11 - - // r3 += 2×l1×l2 - MOVQ 8(CX), AX - IMUL3Q $0x02, AX, AX - MULQ 16(CX) - ADDQ AX, R12 - ADCQ DX, R11 - - // r3 += 19×l4×l4 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(CX) - ADDQ AX, R12 - ADCQ DX, R11 - - // r4 = 2×l0×l4 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 32(CX) - MOVQ AX, R14 - MOVQ DX, R13 - - // r4 += 2×l1×l3 - MOVQ 8(CX), AX - IMUL3Q $0x02, AX, AX - MULQ 24(CX) - ADDQ AX, R14 - ADCQ DX, R13 - - // r4 += l2×l2 - MOVQ 16(CX), AX - MULQ 16(CX) - ADDQ AX, R14 - ADCQ DX, R13 - - // First reduction chain - MOVQ $0x0007ffffffffffff, AX - SHLQ $0x0d, SI, BX - SHLQ $0x0d, R8, DI - SHLQ $0x0d, R10, R9 - SHLQ $0x0d, R12, R11 - SHLQ $0x0d, R14, R13 - ANDQ AX, SI - IMUL3Q $0x13, R13, R13 - ADDQ R13, SI - ANDQ AX, R8 - ADDQ BX, R8 - ANDQ AX, R10 - ADDQ DI, R10 - ANDQ AX, R12 - ADDQ R9, R12 - ANDQ AX, R14 - ADDQ R11, R14 - - // Second reduction chain (carryPropagate) - MOVQ SI, BX - SHRQ $0x33, BX - MOVQ R8, DI - SHRQ $0x33, DI - MOVQ R10, R9 - SHRQ $0x33, R9 - MOVQ R12, R11 - SHRQ $0x33, R11 - MOVQ R14, R13 - SHRQ $0x33, R13 - ANDQ AX, SI - IMUL3Q $0x13, R13, R13 - ADDQ R13, SI - ANDQ AX, R8 - ADDQ BX, R8 - ANDQ AX, R10 - ADDQ DI, R10 - ANDQ AX, R12 - ADDQ R9, R12 - ANDQ AX, R14 - ADDQ R11, R14 - - // Store output - MOVQ out+0(FP), AX - MOVQ SI, (AX) - MOVQ R8, 8(AX) - MOVQ R10, 16(AX) - MOVQ R12, 24(AX) - MOVQ R14, 32(AX) - RET diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go deleted file mode 100644 index ddb6c9b8..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !amd64 || !gc || purego -// +build !amd64 !gc purego - -package field - -func feMul(v, x, y *Element) { feMulGeneric(v, x, y) } - -func feSquare(v, x *Element) { feSquareGeneric(v, x) } diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go deleted file mode 100644 index af459ef5..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build arm64 && gc && !purego -// +build arm64,gc,!purego - -package field - -//go:noescape -func carryPropagate(v *Element) - -func (v *Element) carryPropagate() *Element { - carryPropagate(v) - return v -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s deleted file mode 100644 index 5c91e458..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build arm64 && gc && !purego -// +build arm64,gc,!purego - -#include "textflag.h" - -// carryPropagate works exactly like carryPropagateGeneric and uses the -// same AND, ADD, and LSR+MADD instructions emitted by the compiler, but -// avoids loading R0-R4 twice and uses LDP and STP. -// -// See https://golang.org/issues/43145 for the main compiler issue. -// -// func carryPropagate(v *Element) -TEXT ·carryPropagate(SB),NOFRAME|NOSPLIT,$0-8 - MOVD v+0(FP), R20 - - LDP 0(R20), (R0, R1) - LDP 16(R20), (R2, R3) - MOVD 32(R20), R4 - - AND $0x7ffffffffffff, R0, R10 - AND $0x7ffffffffffff, R1, R11 - AND $0x7ffffffffffff, R2, R12 - AND $0x7ffffffffffff, R3, R13 - AND $0x7ffffffffffff, R4, R14 - - ADD R0>>51, R11, R11 - ADD R1>>51, R12, R12 - ADD R2>>51, R13, R13 - ADD R3>>51, R14, R14 - // R4>>51 * 19 + R10 -> R10 - LSR $51, R4, R21 - MOVD $19, R22 - MADD R22, R10, R21, R10 - - STP (R10, R11), 0(R20) - STP (R12, R13), 16(R20) - MOVD R14, 32(R20) - - RET diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go deleted file mode 100644 index 234a5b2e..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !arm64 || !gc || purego -// +build !arm64 !gc purego - -package field - -func (v *Element) carryPropagate() *Element { - return v.carryPropagateGeneric() -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go deleted file mode 100644 index 2671217d..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright (c) 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package field - -import "math/bits" - -// uint128 holds a 128-bit number as two 64-bit limbs, for use with the -// bits.Mul64 and bits.Add64 intrinsics. -type uint128 struct { - lo, hi uint64 -} - -// mul64 returns a * b. -func mul64(a, b uint64) uint128 { - hi, lo := bits.Mul64(a, b) - return uint128{lo, hi} -} - -// addMul64 returns v + a * b. -func addMul64(v uint128, a, b uint64) uint128 { - hi, lo := bits.Mul64(a, b) - lo, c := bits.Add64(lo, v.lo, 0) - hi, _ = bits.Add64(hi, v.hi, c) - return uint128{lo, hi} -} - -// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits. -func shiftRightBy51(a uint128) uint64 { - return (a.hi << (64 - 51)) | (a.lo >> 51) -} - -func feMulGeneric(v, a, b *Element) { - a0 := a.l0 - a1 := a.l1 - a2 := a.l2 - a3 := a.l3 - a4 := a.l4 - - b0 := b.l0 - b1 := b.l1 - b2 := b.l2 - b3 := b.l3 - b4 := b.l4 - - // Limb multiplication works like pen-and-paper columnar multiplication, but - // with 51-bit limbs instead of digits. - // - // a4 a3 a2 a1 a0 x - // b4 b3 b2 b1 b0 = - // ------------------------ - // a4b0 a3b0 a2b0 a1b0 a0b0 + - // a4b1 a3b1 a2b1 a1b1 a0b1 + - // a4b2 a3b2 a2b2 a1b2 a0b2 + - // a4b3 a3b3 a2b3 a1b3 a0b3 + - // a4b4 a3b4 a2b4 a1b4 a0b4 = - // ---------------------------------------------- - // r8 r7 r6 r5 r4 r3 r2 r1 r0 - // - // We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to - // reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5, - // r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc. - // - // Reduction can be carried out simultaneously to multiplication. For - // example, we do not compute r5: whenever the result of a multiplication - // belongs to r5, like a1b4, we multiply it by 19 and add the result to r0. - // - // a4b0 a3b0 a2b0 a1b0 a0b0 + - // a3b1 a2b1 a1b1 a0b1 19×a4b1 + - // a2b2 a1b2 a0b2 19×a4b2 19×a3b2 + - // a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 + - // a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 = - // -------------------------------------- - // r4 r3 r2 r1 r0 - // - // Finally we add up the columns into wide, overlapping limbs. - - a1_19 := a1 * 19 - a2_19 := a2 * 19 - a3_19 := a3 * 19 - a4_19 := a4 * 19 - - // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) - r0 := mul64(a0, b0) - r0 = addMul64(r0, a1_19, b4) - r0 = addMul64(r0, a2_19, b3) - r0 = addMul64(r0, a3_19, b2) - r0 = addMul64(r0, a4_19, b1) - - // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2) - r1 := mul64(a0, b1) - r1 = addMul64(r1, a1, b0) - r1 = addMul64(r1, a2_19, b4) - r1 = addMul64(r1, a3_19, b3) - r1 = addMul64(r1, a4_19, b2) - - // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3) - r2 := mul64(a0, b2) - r2 = addMul64(r2, a1, b1) - r2 = addMul64(r2, a2, b0) - r2 = addMul64(r2, a3_19, b4) - r2 = addMul64(r2, a4_19, b3) - - // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4 - r3 := mul64(a0, b3) - r3 = addMul64(r3, a1, b2) - r3 = addMul64(r3, a2, b1) - r3 = addMul64(r3, a3, b0) - r3 = addMul64(r3, a4_19, b4) - - // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 - r4 := mul64(a0, b4) - r4 = addMul64(r4, a1, b3) - r4 = addMul64(r4, a2, b2) - r4 = addMul64(r4, a3, b1) - r4 = addMul64(r4, a4, b0) - - // After the multiplication, we need to reduce (carry) the five coefficients - // to obtain a result with limbs that are at most slightly larger than 2⁵¹, - // to respect the Element invariant. - // - // Overall, the reduction works the same as carryPropagate, except with - // wider inputs: we take the carry for each coefficient by shifting it right - // by 51, and add it to the limb above it. The top carry is multiplied by 19 - // according to the reduction identity and added to the lowest limb. - // - // The largest coefficient (r0) will be at most 111 bits, which guarantees - // that all carries are at most 111 - 51 = 60 bits, which fits in a uint64. - // - // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) - // r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²) - // r0 < (1 + 19 × 4) × 2⁵² × 2⁵² - // r0 < 2⁷ × 2⁵² × 2⁵² - // r0 < 2¹¹¹ - // - // Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most - // 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and - // allows us to easily apply the reduction identity. - // - // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 - // r4 < 5 × 2⁵² × 2⁵² - // r4 < 2¹⁰⁷ - // - - c0 := shiftRightBy51(r0) - c1 := shiftRightBy51(r1) - c2 := shiftRightBy51(r2) - c3 := shiftRightBy51(r3) - c4 := shiftRightBy51(r4) - - rr0 := r0.lo&maskLow51Bits + c4*19 - rr1 := r1.lo&maskLow51Bits + c0 - rr2 := r2.lo&maskLow51Bits + c1 - rr3 := r3.lo&maskLow51Bits + c2 - rr4 := r4.lo&maskLow51Bits + c3 - - // Now all coefficients fit into 64-bit registers but are still too large to - // be passed around as a Element. We therefore do one last carry chain, - // where the carries will be small enough to fit in the wiggle room above 2⁵¹. - *v = Element{rr0, rr1, rr2, rr3, rr4} - v.carryPropagate() -} - -func feSquareGeneric(v, a *Element) { - l0 := a.l0 - l1 := a.l1 - l2 := a.l2 - l3 := a.l3 - l4 := a.l4 - - // Squaring works precisely like multiplication above, but thanks to its - // symmetry we get to group a few terms together. - // - // l4 l3 l2 l1 l0 x - // l4 l3 l2 l1 l0 = - // ------------------------ - // l4l0 l3l0 l2l0 l1l0 l0l0 + - // l4l1 l3l1 l2l1 l1l1 l0l1 + - // l4l2 l3l2 l2l2 l1l2 l0l2 + - // l4l3 l3l3 l2l3 l1l3 l0l3 + - // l4l4 l3l4 l2l4 l1l4 l0l4 = - // ---------------------------------------------- - // r8 r7 r6 r5 r4 r3 r2 r1 r0 - // - // l4l0 l3l0 l2l0 l1l0 l0l0 + - // l3l1 l2l1 l1l1 l0l1 19×l4l1 + - // l2l2 l1l2 l0l2 19×l4l2 19×l3l2 + - // l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 + - // l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 = - // -------------------------------------- - // r4 r3 r2 r1 r0 - // - // With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with - // only three Mul64 and four Add64, instead of five and eight. - - l0_2 := l0 * 2 - l1_2 := l1 * 2 - - l1_38 := l1 * 38 - l2_38 := l2 * 38 - l3_38 := l3 * 38 - - l3_19 := l3 * 19 - l4_19 := l4 * 19 - - // r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3) - r0 := mul64(l0, l0) - r0 = addMul64(r0, l1_38, l4) - r0 = addMul64(r0, l2_38, l3) - - // r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3 - r1 := mul64(l0_2, l1) - r1 = addMul64(r1, l2_38, l4) - r1 = addMul64(r1, l3_19, l3) - - // r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4 - r2 := mul64(l0_2, l2) - r2 = addMul64(r2, l1, l1) - r2 = addMul64(r2, l3_38, l4) - - // r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4 - r3 := mul64(l0_2, l3) - r3 = addMul64(r3, l1_2, l2) - r3 = addMul64(r3, l4_19, l4) - - // r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2 - r4 := mul64(l0_2, l4) - r4 = addMul64(r4, l1_2, l3) - r4 = addMul64(r4, l2, l2) - - c0 := shiftRightBy51(r0) - c1 := shiftRightBy51(r1) - c2 := shiftRightBy51(r2) - c3 := shiftRightBy51(r3) - c4 := shiftRightBy51(r4) - - rr0 := r0.lo&maskLow51Bits + c4*19 - rr1 := r1.lo&maskLow51Bits + c0 - rr2 := r2.lo&maskLow51Bits + c1 - rr3 := r3.lo&maskLow51Bits + c2 - rr4 := r4.lo&maskLow51Bits + c3 - - *v = Element{rr0, rr1, rr2, rr3, rr4} - v.carryPropagate() -} - -// carryPropagateGeneric brings the limbs below 52 bits by applying the reduction -// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry. TODO inline -func (v *Element) carryPropagateGeneric() *Element { - c0 := v.l0 >> 51 - c1 := v.l1 >> 51 - c2 := v.l2 >> 51 - c3 := v.l3 >> 51 - c4 := v.l4 >> 51 - - v.l0 = v.l0&maskLow51Bits + c4*19 - v.l1 = v.l1&maskLow51Bits + c0 - v.l2 = v.l2&maskLow51Bits + c1 - v.l3 = v.l3&maskLow51Bits + c2 - v.l4 = v.l4&maskLow51Bits + c3 - - return v -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint b/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint deleted file mode 100644 index e3685f95..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint +++ /dev/null @@ -1 +0,0 @@ -b0c49ae9f59d233526f8934262c5bbbe14d4358d diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh b/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh deleted file mode 100644 index 1ba22a8b..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh +++ /dev/null @@ -1,19 +0,0 @@ -#! /bin/bash -set -euo pipefail - -cd "$(git rev-parse --show-toplevel)" - -STD_PATH=src/crypto/ed25519/internal/edwards25519/field -LOCAL_PATH=curve25519/internal/field -LAST_SYNC_REF=$(cat $LOCAL_PATH/sync.checkpoint) - -git fetch https://go.googlesource.com/go master - -if git diff --quiet $LAST_SYNC_REF:$STD_PATH FETCH_HEAD:$STD_PATH; then - echo "No changes." -else - NEW_REF=$(git rev-parse FETCH_HEAD | tee $LOCAL_PATH/sync.checkpoint) - echo "Applying changes from $LAST_SYNC_REF to $NEW_REF..." - git diff $LAST_SYNC_REF:$STD_PATH FETCH_HEAD:$STD_PATH | \ - git apply -3 --directory=$LOCAL_PATH -fi diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go deleted file mode 100644 index a7828345..00000000 --- a/vendor/golang.org/x/crypto/ed25519/ed25519.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ed25519 implements the Ed25519 signature algorithm. See -// https://ed25519.cr.yp.to/. -// -// These functions are also compatible with the “Ed25519” function defined in -// RFC 8032. However, unlike RFC 8032's formulation, this package's private key -// representation includes a public key suffix to make multiple signing -// operations with the same key more efficient. This package refers to the RFC -// 8032 private key as the “seed”. -// -// Beginning with Go 1.13, the functionality of this package was moved to the -// standard library as crypto/ed25519. This package only acts as a compatibility -// wrapper. -package ed25519 - -import ( - "crypto/ed25519" - "io" -) - -const ( - // PublicKeySize is the size, in bytes, of public keys as used in this package. - PublicKeySize = 32 - // PrivateKeySize is the size, in bytes, of private keys as used in this package. - PrivateKeySize = 64 - // SignatureSize is the size, in bytes, of signatures generated and verified by this package. - SignatureSize = 64 - // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. - SeedSize = 32 -) - -// PublicKey is the type of Ed25519 public keys. -// -// This type is an alias for crypto/ed25519's PublicKey type. -// See the crypto/ed25519 package for the methods on this type. -type PublicKey = ed25519.PublicKey - -// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. -// -// This type is an alias for crypto/ed25519's PrivateKey type. -// See the crypto/ed25519 package for the methods on this type. -type PrivateKey = ed25519.PrivateKey - -// GenerateKey generates a public/private key pair using entropy from rand. -// If rand is nil, crypto/rand.Reader will be used. -func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { - return ed25519.GenerateKey(rand) -} - -// NewKeyFromSeed calculates a private key from a seed. It will panic if -// len(seed) is not SeedSize. This function is provided for interoperability -// with RFC 8032. RFC 8032's private keys correspond to seeds in this -// package. -func NewKeyFromSeed(seed []byte) PrivateKey { - return ed25519.NewKeyFromSeed(seed) -} - -// Sign signs the message with privateKey and returns a signature. It will -// panic if len(privateKey) is not PrivateKeySize. -func Sign(privateKey PrivateKey, message []byte) []byte { - return ed25519.Sign(privateKey, message) -} - -// Verify reports whether sig is a valid signature of message by publicKey. It -// will panic if len(publicKey) is not PublicKeySize. -func Verify(publicKey PublicKey, message, sig []byte) bool { - return ed25519.Verify(publicKey, message, sig) -} diff --git a/vendor/golang.org/x/crypto/internal/alias/alias.go b/vendor/golang.org/x/crypto/internal/alias/alias.go deleted file mode 100644 index 69c17f82..00000000 --- a/vendor/golang.org/x/crypto/internal/alias/alias.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !purego -// +build !purego - -// Package alias implements memory aliasing tests. -package alias - -import "unsafe" - -// AnyOverlap reports whether x and y share memory at any (not necessarily -// corresponding) index. The memory beyond the slice length is ignored. -func AnyOverlap(x, y []byte) bool { - return len(x) > 0 && len(y) > 0 && - uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) && - uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1])) -} - -// InexactOverlap reports whether x and y share memory at any non-corresponding -// index. The memory beyond the slice length is ignored. Note that x and y can -// have different lengths and still not have any inexact overlap. -// -// InexactOverlap can be used to implement the requirements of the crypto/cipher -// AEAD, Block, BlockMode and Stream interfaces. -func InexactOverlap(x, y []byte) bool { - if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { - return false - } - return AnyOverlap(x, y) -} diff --git a/vendor/golang.org/x/crypto/internal/alias/alias_purego.go b/vendor/golang.org/x/crypto/internal/alias/alias_purego.go deleted file mode 100644 index 4775b0a4..00000000 --- a/vendor/golang.org/x/crypto/internal/alias/alias_purego.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego -// +build purego - -// Package alias implements memory aliasing tests. -package alias - -// This is the Google App Engine standard variant based on reflect -// because the unsafe package and cgo are disallowed. - -import "reflect" - -// AnyOverlap reports whether x and y share memory at any (not necessarily -// corresponding) index. The memory beyond the slice length is ignored. -func AnyOverlap(x, y []byte) bool { - return len(x) > 0 && len(y) > 0 && - reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() && - reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer() -} - -// InexactOverlap reports whether x and y share memory at any non-corresponding -// index. The memory beyond the slice length is ignored. Note that x and y can -// have different lengths and still not have any inexact overlap. -// -// InexactOverlap can be used to implement the requirements of the crypto/cipher -// AEAD, Block, BlockMode and Stream interfaces. -func InexactOverlap(x, y []byte) bool { - if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { - return false - } - return AnyOverlap(x, y) -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go deleted file mode 100644 index 45b5c966..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.13 -// +build !go1.13 - -package poly1305 - -// Generic fallbacks for the math/bits intrinsics, copied from -// src/math/bits/bits.go. They were added in Go 1.12, but Add64 and Sum64 had -// variable time fallbacks until Go 1.13. - -func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { - sum = x + y + carry - carryOut = ((x & y) | ((x | y) &^ sum)) >> 63 - return -} - -func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { - diff = x - y - borrow - borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63 - return -} - -func bitsMul64(x, y uint64) (hi, lo uint64) { - const mask32 = 1<<32 - 1 - x0 := x & mask32 - x1 := x >> 32 - y0 := y & mask32 - y1 := y >> 32 - w0 := x0 * y0 - t := x1*y0 + w0>>32 - w1 := t & mask32 - w2 := t >> 32 - w1 += x0 * y1 - hi = x1*y1 + w2 + w1>>32 - lo = x * y - return -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go deleted file mode 100644 index ed52b341..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.13 -// +build go1.13 - -package poly1305 - -import "math/bits" - -func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { - return bits.Add64(x, y, carry) -} - -func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { - return bits.Sub64(x, y, borrow) -} - -func bitsMul64(x, y uint64) (hi, lo uint64) { - return bits.Mul64(x, y) -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go deleted file mode 100644 index f184b67d..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (!amd64 && !ppc64le && !s390x) || !gc || purego -// +build !amd64,!ppc64le,!s390x !gc purego - -package poly1305 - -type mac struct{ macGeneric } diff --git a/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go b/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go deleted file mode 100644 index 4aaea810..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package poly1305 implements Poly1305 one-time message authentication code as -// specified in https://cr.yp.to/mac/poly1305-20050329.pdf. -// -// Poly1305 is a fast, one-time authentication function. It is infeasible for an -// attacker to generate an authenticator for a message without the key. However, a -// key must only be used for a single message. Authenticating two different -// messages with the same key allows an attacker to forge authenticators for other -// messages with the same key. -// -// Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was -// used with a fixed key in order to generate one-time keys from an nonce. -// However, in this package AES isn't used and the one-time key is specified -// directly. -package poly1305 - -import "crypto/subtle" - -// TagSize is the size, in bytes, of a poly1305 authenticator. -const TagSize = 16 - -// Sum generates an authenticator for msg using a one-time key and puts the -// 16-byte result into out. Authenticating two different messages with the same -// key allows an attacker to forge messages at will. -func Sum(out *[16]byte, m []byte, key *[32]byte) { - h := New(key) - h.Write(m) - h.Sum(out[:0]) -} - -// Verify returns true if mac is a valid authenticator for m with the given key. -func Verify(mac *[16]byte, m []byte, key *[32]byte) bool { - var tmp [16]byte - Sum(&tmp, m, key) - return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1 -} - -// New returns a new MAC computing an authentication -// tag of all data written to it with the given key. -// This allows writing the message progressively instead -// of passing it as a single slice. Common users should use -// the Sum function instead. -// -// The key must be unique for each message, as authenticating -// two different messages with the same key allows an attacker -// to forge messages at will. -func New(key *[32]byte) *MAC { - m := &MAC{} - initialize(key, &m.macState) - return m -} - -// MAC is an io.Writer computing an authentication tag -// of the data written to it. -// -// MAC cannot be used like common hash.Hash implementations, -// because using a poly1305 key twice breaks its security. -// Therefore writing data to a running MAC after calling -// Sum or Verify causes it to panic. -type MAC struct { - mac // platform-dependent implementation - - finalized bool -} - -// Size returns the number of bytes Sum will return. -func (h *MAC) Size() int { return TagSize } - -// Write adds more data to the running message authentication code. -// It never returns an error. -// -// It must not be called after the first call of Sum or Verify. -func (h *MAC) Write(p []byte) (n int, err error) { - if h.finalized { - panic("poly1305: write to MAC after Sum or Verify") - } - return h.mac.Write(p) -} - -// Sum computes the authenticator of all data written to the -// message authentication code. -func (h *MAC) Sum(b []byte) []byte { - var mac [TagSize]byte - h.mac.Sum(&mac) - h.finalized = true - return append(b, mac[:]...) -} - -// Verify returns whether the authenticator of all data written to -// the message authentication code matches the expected value. -func (h *MAC) Verify(expected []byte) bool { - var mac [TagSize]byte - h.mac.Sum(&mac) - h.finalized = true - return subtle.ConstantTimeCompare(expected, mac[:]) == 1 -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go deleted file mode 100644 index 6d522333..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -package poly1305 - -//go:noescape -func update(state *macState, msg []byte) - -// mac is a wrapper for macGeneric that redirects calls that would have gone to -// updateGeneric to update. -// -// Its Write and Sum methods are otherwise identical to the macGeneric ones, but -// using function pointers would carry a major performance cost. -type mac struct{ macGeneric } - -func (h *mac) Write(p []byte) (int, error) { - nn := len(p) - if h.offset > 0 { - n := copy(h.buffer[h.offset:], p) - if h.offset+n < TagSize { - h.offset += n - return nn, nil - } - p = p[n:] - h.offset = 0 - update(&h.macState, h.buffer[:]) - } - if n := len(p) - (len(p) % TagSize); n > 0 { - update(&h.macState, p[:n]) - p = p[n:] - } - if len(p) > 0 { - h.offset += copy(h.buffer[h.offset:], p) - } - return nn, nil -} - -func (h *mac) Sum(out *[16]byte) { - state := h.macState - if h.offset > 0 { - update(&state, h.buffer[:h.offset]) - } - finalize(out, &state.h, &state.s) -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s deleted file mode 100644 index 1d74f0f8..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -#include "textflag.h" - -#define POLY1305_ADD(msg, h0, h1, h2) \ - ADDQ 0(msg), h0; \ - ADCQ 8(msg), h1; \ - ADCQ $1, h2; \ - LEAQ 16(msg), msg - -#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \ - MOVQ r0, AX; \ - MULQ h0; \ - MOVQ AX, t0; \ - MOVQ DX, t1; \ - MOVQ r0, AX; \ - MULQ h1; \ - ADDQ AX, t1; \ - ADCQ $0, DX; \ - MOVQ r0, t2; \ - IMULQ h2, t2; \ - ADDQ DX, t2; \ - \ - MOVQ r1, AX; \ - MULQ h0; \ - ADDQ AX, t1; \ - ADCQ $0, DX; \ - MOVQ DX, h0; \ - MOVQ r1, t3; \ - IMULQ h2, t3; \ - MOVQ r1, AX; \ - MULQ h1; \ - ADDQ AX, t2; \ - ADCQ DX, t3; \ - ADDQ h0, t2; \ - ADCQ $0, t3; \ - \ - MOVQ t0, h0; \ - MOVQ t1, h1; \ - MOVQ t2, h2; \ - ANDQ $3, h2; \ - MOVQ t2, t0; \ - ANDQ $0xFFFFFFFFFFFFFFFC, t0; \ - ADDQ t0, h0; \ - ADCQ t3, h1; \ - ADCQ $0, h2; \ - SHRQ $2, t3, t2; \ - SHRQ $2, t3; \ - ADDQ t2, h0; \ - ADCQ t3, h1; \ - ADCQ $0, h2 - -// func update(state *[7]uint64, msg []byte) -TEXT ·update(SB), $0-32 - MOVQ state+0(FP), DI - MOVQ msg_base+8(FP), SI - MOVQ msg_len+16(FP), R15 - - MOVQ 0(DI), R8 // h0 - MOVQ 8(DI), R9 // h1 - MOVQ 16(DI), R10 // h2 - MOVQ 24(DI), R11 // r0 - MOVQ 32(DI), R12 // r1 - - CMPQ R15, $16 - JB bytes_between_0_and_15 - -loop: - POLY1305_ADD(SI, R8, R9, R10) - -multiply: - POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14) - SUBQ $16, R15 - CMPQ R15, $16 - JAE loop - -bytes_between_0_and_15: - TESTQ R15, R15 - JZ done - MOVQ $1, BX - XORQ CX, CX - XORQ R13, R13 - ADDQ R15, SI - -flush_buffer: - SHLQ $8, BX, CX - SHLQ $8, BX - MOVB -1(SI), R13 - XORQ R13, BX - DECQ SI - DECQ R15 - JNZ flush_buffer - - ADDQ BX, R8 - ADCQ CX, R9 - ADCQ $0, R10 - MOVQ $16, R15 - JMP multiply - -done: - MOVQ R8, 0(DI) - MOVQ R9, 8(DI) - MOVQ R10, 16(DI) - RET diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go deleted file mode 100644 index e041da5e..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file provides the generic implementation of Sum and MAC. Other files -// might provide optimized assembly implementations of some of this code. - -package poly1305 - -import "encoding/binary" - -// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag -// for a 64 bytes message is approximately -// -// s + m[0:16] * r⁴ + m[16:32] * r³ + m[32:48] * r² + m[48:64] * r mod 2¹³⁰ - 5 -// -// for some secret r and s. It can be computed sequentially like -// -// for len(msg) > 0: -// h += read(msg, 16) -// h *= r -// h %= 2¹³⁰ - 5 -// return h + s -// -// All the complexity is about doing performant constant-time math on numbers -// larger than any available numeric type. - -func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) { - h := newMACGeneric(key) - h.Write(msg) - h.Sum(out) -} - -func newMACGeneric(key *[32]byte) macGeneric { - m := macGeneric{} - initialize(key, &m.macState) - return m -} - -// macState holds numbers in saturated 64-bit little-endian limbs. That is, -// the value of [x0, x1, x2] is x[0] + x[1] * 2⁶⁴ + x[2] * 2¹²⁸. -type macState struct { - // h is the main accumulator. It is to be interpreted modulo 2¹³⁰ - 5, but - // can grow larger during and after rounds. It must, however, remain below - // 2 * (2¹³⁰ - 5). - h [3]uint64 - // r and s are the private key components. - r [2]uint64 - s [2]uint64 -} - -type macGeneric struct { - macState - - buffer [TagSize]byte - offset int -} - -// Write splits the incoming message into TagSize chunks, and passes them to -// update. It buffers incomplete chunks. -func (h *macGeneric) Write(p []byte) (int, error) { - nn := len(p) - if h.offset > 0 { - n := copy(h.buffer[h.offset:], p) - if h.offset+n < TagSize { - h.offset += n - return nn, nil - } - p = p[n:] - h.offset = 0 - updateGeneric(&h.macState, h.buffer[:]) - } - if n := len(p) - (len(p) % TagSize); n > 0 { - updateGeneric(&h.macState, p[:n]) - p = p[n:] - } - if len(p) > 0 { - h.offset += copy(h.buffer[h.offset:], p) - } - return nn, nil -} - -// Sum flushes the last incomplete chunk from the buffer, if any, and generates -// the MAC output. It does not modify its state, in order to allow for multiple -// calls to Sum, even if no Write is allowed after Sum. -func (h *macGeneric) Sum(out *[TagSize]byte) { - state := h.macState - if h.offset > 0 { - updateGeneric(&state, h.buffer[:h.offset]) - } - finalize(out, &state.h, &state.s) -} - -// [rMask0, rMask1] is the specified Poly1305 clamping mask in little-endian. It -// clears some bits of the secret coefficient to make it possible to implement -// multiplication more efficiently. -const ( - rMask0 = 0x0FFFFFFC0FFFFFFF - rMask1 = 0x0FFFFFFC0FFFFFFC -) - -// initialize loads the 256-bit key into the two 128-bit secret values r and s. -func initialize(key *[32]byte, m *macState) { - m.r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0 - m.r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1 - m.s[0] = binary.LittleEndian.Uint64(key[16:24]) - m.s[1] = binary.LittleEndian.Uint64(key[24:32]) -} - -// uint128 holds a 128-bit number as two 64-bit limbs, for use with the -// bits.Mul64 and bits.Add64 intrinsics. -type uint128 struct { - lo, hi uint64 -} - -func mul64(a, b uint64) uint128 { - hi, lo := bitsMul64(a, b) - return uint128{lo, hi} -} - -func add128(a, b uint128) uint128 { - lo, c := bitsAdd64(a.lo, b.lo, 0) - hi, c := bitsAdd64(a.hi, b.hi, c) - if c != 0 { - panic("poly1305: unexpected overflow") - } - return uint128{lo, hi} -} - -func shiftRightBy2(a uint128) uint128 { - a.lo = a.lo>>2 | (a.hi&3)<<62 - a.hi = a.hi >> 2 - return a -} - -// updateGeneric absorbs msg into the state.h accumulator. For each chunk m of -// 128 bits of message, it computes -// -// h₊ = (h + m) * r mod 2¹³⁰ - 5 -// -// If the msg length is not a multiple of TagSize, it assumes the last -// incomplete chunk is the final one. -func updateGeneric(state *macState, msg []byte) { - h0, h1, h2 := state.h[0], state.h[1], state.h[2] - r0, r1 := state.r[0], state.r[1] - - for len(msg) > 0 { - var c uint64 - - // For the first step, h + m, we use a chain of bits.Add64 intrinsics. - // The resulting value of h might exceed 2¹³⁰ - 5, but will be partially - // reduced at the end of the multiplication below. - // - // The spec requires us to set a bit just above the message size, not to - // hide leading zeroes. For full chunks, that's 1 << 128, so we can just - // add 1 to the most significant (2¹²⁸) limb, h2. - if len(msg) >= TagSize { - h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0) - h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c) - h2 += c + 1 - - msg = msg[TagSize:] - } else { - var buf [TagSize]byte - copy(buf[:], msg) - buf[len(msg)] = 1 - - h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0) - h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c) - h2 += c - - msg = nil - } - - // Multiplication of big number limbs is similar to elementary school - // columnar multiplication. Instead of digits, there are 64-bit limbs. - // - // We are multiplying a 3 limbs number, h, by a 2 limbs number, r. - // - // h2 h1 h0 x - // r1 r0 = - // ---------------- - // h2r0 h1r0 h0r0 <-- individual 128-bit products - // + h2r1 h1r1 h0r1 - // ------------------------ - // m3 m2 m1 m0 <-- result in 128-bit overlapping limbs - // ------------------------ - // m3.hi m2.hi m1.hi m0.hi <-- carry propagation - // + m3.lo m2.lo m1.lo m0.lo - // ------------------------------- - // t4 t3 t2 t1 t0 <-- final result in 64-bit limbs - // - // The main difference from pen-and-paper multiplication is that we do - // carry propagation in a separate step, as if we wrote two digit sums - // at first (the 128-bit limbs), and then carried the tens all at once. - - h0r0 := mul64(h0, r0) - h1r0 := mul64(h1, r0) - h2r0 := mul64(h2, r0) - h0r1 := mul64(h0, r1) - h1r1 := mul64(h1, r1) - h2r1 := mul64(h2, r1) - - // Since h2 is known to be at most 7 (5 + 1 + 1), and r0 and r1 have their - // top 4 bits cleared by rMask{0,1}, we know that their product is not going - // to overflow 64 bits, so we can ignore the high part of the products. - // - // This also means that the product doesn't have a fifth limb (t4). - if h2r0.hi != 0 { - panic("poly1305: unexpected overflow") - } - if h2r1.hi != 0 { - panic("poly1305: unexpected overflow") - } - - m0 := h0r0 - m1 := add128(h1r0, h0r1) // These two additions don't overflow thanks again - m2 := add128(h2r0, h1r1) // to the 4 masked bits at the top of r0 and r1. - m3 := h2r1 - - t0 := m0.lo - t1, c := bitsAdd64(m1.lo, m0.hi, 0) - t2, c := bitsAdd64(m2.lo, m1.hi, c) - t3, _ := bitsAdd64(m3.lo, m2.hi, c) - - // Now we have the result as 4 64-bit limbs, and we need to reduce it - // modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do - // a cheap partial reduction according to the reduction identity - // - // c * 2¹³⁰ + n = c * 5 + n mod 2¹³⁰ - 5 - // - // because 2¹³⁰ = 5 mod 2¹³⁰ - 5. Partial reduction since the result is - // likely to be larger than 2¹³⁰ - 5, but still small enough to fit the - // assumptions we make about h in the rest of the code. - // - // See also https://speakerdeck.com/gtank/engineering-prime-numbers?slide=23 - - // We split the final result at the 2¹³⁰ mark into h and cc, the carry. - // Note that the carry bits are effectively shifted left by 2, in other - // words, cc = c * 4 for the c in the reduction identity. - h0, h1, h2 = t0, t1, t2&maskLow2Bits - cc := uint128{t2 & maskNotLow2Bits, t3} - - // To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c. - - h0, c = bitsAdd64(h0, cc.lo, 0) - h1, c = bitsAdd64(h1, cc.hi, c) - h2 += c - - cc = shiftRightBy2(cc) - - h0, c = bitsAdd64(h0, cc.lo, 0) - h1, c = bitsAdd64(h1, cc.hi, c) - h2 += c - - // h2 is at most 3 + 1 + 1 = 5, making the whole of h at most - // - // 5 * 2¹²⁸ + (2¹²⁸ - 1) = 6 * 2¹²⁸ - 1 - } - - state.h[0], state.h[1], state.h[2] = h0, h1, h2 -} - -const ( - maskLow2Bits uint64 = 0x0000000000000003 - maskNotLow2Bits uint64 = ^maskLow2Bits -) - -// select64 returns x if v == 1 and y if v == 0, in constant time. -func select64(v, x, y uint64) uint64 { return ^(v-1)&x | (v-1)&y } - -// [p0, p1, p2] is 2¹³⁰ - 5 in little endian order. -const ( - p0 = 0xFFFFFFFFFFFFFFFB - p1 = 0xFFFFFFFFFFFFFFFF - p2 = 0x0000000000000003 -) - -// finalize completes the modular reduction of h and computes -// -// out = h + s mod 2¹²⁸ -func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { - h0, h1, h2 := h[0], h[1], h[2] - - // After the partial reduction in updateGeneric, h might be more than - // 2¹³⁰ - 5, but will be less than 2 * (2¹³⁰ - 5). To complete the reduction - // in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the - // result if the subtraction underflows, and t otherwise. - - hMinusP0, b := bitsSub64(h0, p0, 0) - hMinusP1, b := bitsSub64(h1, p1, b) - _, b = bitsSub64(h2, p2, b) - - // h = h if h < p else h - p - h0 = select64(b, h0, hMinusP0) - h1 = select64(b, h1, hMinusP1) - - // Finally, we compute the last Poly1305 step - // - // tag = h + s mod 2¹²⁸ - // - // by just doing a wide addition with the 128 low bits of h and discarding - // the overflow. - h0, c := bitsAdd64(h0, s[0], 0) - h1, _ = bitsAdd64(h1, s[1], c) - - binary.LittleEndian.PutUint64(out[0:8], h0) - binary.LittleEndian.PutUint64(out[8:16], h1) -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go deleted file mode 100644 index 4a069941..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -package poly1305 - -//go:noescape -func update(state *macState, msg []byte) - -// mac is a wrapper for macGeneric that redirects calls that would have gone to -// updateGeneric to update. -// -// Its Write and Sum methods are otherwise identical to the macGeneric ones, but -// using function pointers would carry a major performance cost. -type mac struct{ macGeneric } - -func (h *mac) Write(p []byte) (int, error) { - nn := len(p) - if h.offset > 0 { - n := copy(h.buffer[h.offset:], p) - if h.offset+n < TagSize { - h.offset += n - return nn, nil - } - p = p[n:] - h.offset = 0 - update(&h.macState, h.buffer[:]) - } - if n := len(p) - (len(p) % TagSize); n > 0 { - update(&h.macState, p[:n]) - p = p[n:] - } - if len(p) > 0 { - h.offset += copy(h.buffer[h.offset:], p) - } - return nn, nil -} - -func (h *mac) Sum(out *[16]byte) { - state := h.macState - if h.offset > 0 { - update(&state, h.buffer[:h.offset]) - } - finalize(out, &state.h, &state.s) -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s deleted file mode 100644 index 58422aad..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -#include "textflag.h" - -// This was ported from the amd64 implementation. - -#define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \ - MOVD (msg), t0; \ - MOVD 8(msg), t1; \ - MOVD $1, t2; \ - ADDC t0, h0, h0; \ - ADDE t1, h1, h1; \ - ADDE t2, h2; \ - ADD $16, msg - -#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \ - MULLD r0, h0, t0; \ - MULLD r0, h1, t4; \ - MULHDU r0, h0, t1; \ - MULHDU r0, h1, t5; \ - ADDC t4, t1, t1; \ - MULLD r0, h2, t2; \ - ADDZE t5; \ - MULHDU r1, h0, t4; \ - MULLD r1, h0, h0; \ - ADD t5, t2, t2; \ - ADDC h0, t1, t1; \ - MULLD h2, r1, t3; \ - ADDZE t4, h0; \ - MULHDU r1, h1, t5; \ - MULLD r1, h1, t4; \ - ADDC t4, t2, t2; \ - ADDE t5, t3, t3; \ - ADDC h0, t2, t2; \ - MOVD $-4, t4; \ - MOVD t0, h0; \ - MOVD t1, h1; \ - ADDZE t3; \ - ANDCC $3, t2, h2; \ - AND t2, t4, t0; \ - ADDC t0, h0, h0; \ - ADDE t3, h1, h1; \ - SLD $62, t3, t4; \ - SRD $2, t2; \ - ADDZE h2; \ - OR t4, t2, t2; \ - SRD $2, t3; \ - ADDC t2, h0, h0; \ - ADDE t3, h1, h1; \ - ADDZE h2 - -DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF -DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC -GLOBL ·poly1305Mask<>(SB), RODATA, $16 - -// func update(state *[7]uint64, msg []byte) -TEXT ·update(SB), $0-32 - MOVD state+0(FP), R3 - MOVD msg_base+8(FP), R4 - MOVD msg_len+16(FP), R5 - - MOVD 0(R3), R8 // h0 - MOVD 8(R3), R9 // h1 - MOVD 16(R3), R10 // h2 - MOVD 24(R3), R11 // r0 - MOVD 32(R3), R12 // r1 - - CMP R5, $16 - BLT bytes_between_0_and_15 - -loop: - POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22) - -multiply: - POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21) - ADD $-16, R5 - CMP R5, $16 - BGE loop - -bytes_between_0_and_15: - CMP R5, $0 - BEQ done - MOVD $0, R16 // h0 - MOVD $0, R17 // h1 - -flush_buffer: - CMP R5, $8 - BLE just1 - - MOVD $8, R21 - SUB R21, R5, R21 - - // Greater than 8 -- load the rightmost remaining bytes in msg - // and put into R17 (h1) - MOVD (R4)(R21), R17 - MOVD $16, R22 - - // Find the offset to those bytes - SUB R5, R22, R22 - SLD $3, R22 - - // Shift to get only the bytes in msg - SRD R22, R17, R17 - - // Put 1 at high end - MOVD $1, R23 - SLD $3, R21 - SLD R21, R23, R23 - OR R23, R17, R17 - - // Remainder is 8 - MOVD $8, R5 - -just1: - CMP R5, $8 - BLT less8 - - // Exactly 8 - MOVD (R4), R16 - - CMP R17, $0 - - // Check if we've already set R17; if not - // set 1 to indicate end of msg. - BNE carry - MOVD $1, R17 - BR carry - -less8: - MOVD $0, R16 // h0 - MOVD $0, R22 // shift count - CMP R5, $4 - BLT less4 - MOVWZ (R4), R16 - ADD $4, R4 - ADD $-4, R5 - MOVD $32, R22 - -less4: - CMP R5, $2 - BLT less2 - MOVHZ (R4), R21 - SLD R22, R21, R21 - OR R16, R21, R16 - ADD $16, R22 - ADD $-2, R5 - ADD $2, R4 - -less2: - CMP R5, $0 - BEQ insert1 - MOVBZ (R4), R21 - SLD R22, R21, R21 - OR R16, R21, R16 - ADD $8, R22 - -insert1: - // Insert 1 at end of msg - MOVD $1, R21 - SLD R22, R21, R21 - OR R16, R21, R16 - -carry: - // Add new values to h0, h1, h2 - ADDC R16, R8 - ADDE R17, R9 - ADDZE R10, R10 - MOVD $16, R5 - ADD R5, R4 - BR multiply - -done: - // Save h0, h1, h2 in state - MOVD R8, 0(R3) - MOVD R9, 8(R3) - MOVD R10, 16(R3) - RET diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go deleted file mode 100644 index ec959668..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -package poly1305 - -import ( - "golang.org/x/sys/cpu" -) - -// updateVX is an assembly implementation of Poly1305 that uses vector -// instructions. It must only be called if the vector facility (vx) is -// available. -// -//go:noescape -func updateVX(state *macState, msg []byte) - -// mac is a replacement for macGeneric that uses a larger buffer and redirects -// calls that would have gone to updateGeneric to updateVX if the vector -// facility is installed. -// -// A larger buffer is required for good performance because the vector -// implementation has a higher fixed cost per call than the generic -// implementation. -type mac struct { - macState - - buffer [16 * TagSize]byte // size must be a multiple of block size (16) - offset int -} - -func (h *mac) Write(p []byte) (int, error) { - nn := len(p) - if h.offset > 0 { - n := copy(h.buffer[h.offset:], p) - if h.offset+n < len(h.buffer) { - h.offset += n - return nn, nil - } - p = p[n:] - h.offset = 0 - if cpu.S390X.HasVX { - updateVX(&h.macState, h.buffer[:]) - } else { - updateGeneric(&h.macState, h.buffer[:]) - } - } - - tail := len(p) % len(h.buffer) // number of bytes to copy into buffer - body := len(p) - tail // number of bytes to process now - if body > 0 { - if cpu.S390X.HasVX { - updateVX(&h.macState, p[:body]) - } else { - updateGeneric(&h.macState, p[:body]) - } - } - h.offset = copy(h.buffer[:], p[body:]) // copy tail bytes - can be 0 - return nn, nil -} - -func (h *mac) Sum(out *[TagSize]byte) { - state := h.macState - remainder := h.buffer[:h.offset] - - // Use the generic implementation if we have 2 or fewer blocks left - // to sum. The vector implementation has a higher startup time. - if cpu.S390X.HasVX && len(remainder) > 2*TagSize { - updateVX(&state, remainder) - } else if len(remainder) > 0 { - updateGeneric(&state, remainder) - } - finalize(out, &state.h, &state.s) -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s deleted file mode 100644 index aa9e0494..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s +++ /dev/null @@ -1,504 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -#include "textflag.h" - -// This implementation of Poly1305 uses the vector facility (vx) -// to process up to 2 blocks (32 bytes) per iteration using an -// algorithm based on the one described in: -// -// NEON crypto, Daniel J. Bernstein & Peter Schwabe -// https://cryptojedi.org/papers/neoncrypto-20120320.pdf -// -// This algorithm uses 5 26-bit limbs to represent a 130-bit -// value. These limbs are, for the most part, zero extended and -// placed into 64-bit vector register elements. Each vector -// register is 128-bits wide and so holds 2 of these elements. -// Using 26-bit limbs allows us plenty of headroom to accommodate -// accumulations before and after multiplication without -// overflowing either 32-bits (before multiplication) or 64-bits -// (after multiplication). -// -// In order to parallelise the operations required to calculate -// the sum we use two separate accumulators and then sum those -// in an extra final step. For compatibility with the generic -// implementation we perform this summation at the end of every -// updateVX call. -// -// To use two accumulators we must multiply the message blocks -// by r² rather than r. Only the final message block should be -// multiplied by r. -// -// Example: -// -// We want to calculate the sum (h) for a 64 byte message (m): -// -// h = m[0:16]r⁴ + m[16:32]r³ + m[32:48]r² + m[48:64]r -// -// To do this we split the calculation into the even indices -// and odd indices of the message. These form our SIMD 'lanes': -// -// h = m[ 0:16]r⁴ + m[32:48]r² + <- lane 0 -// m[16:32]r³ + m[48:64]r <- lane 1 -// -// To calculate this iteratively we refactor so that both lanes -// are written in terms of r² and r: -// -// h = (m[ 0:16]r² + m[32:48])r² + <- lane 0 -// (m[16:32]r² + m[48:64])r <- lane 1 -// ^ ^ -// | coefficients for second iteration -// coefficients for first iteration -// -// So in this case we would have two iterations. In the first -// both lanes are multiplied by r². In the second only the -// first lane is multiplied by r² and the second lane is -// instead multiplied by r. This gives use the odd and even -// powers of r that we need from the original equation. -// -// Notation: -// -// h - accumulator -// r - key -// m - message -// -// [a, b] - SIMD register holding two 64-bit values -// [a, b, c, d] - SIMD register holding four 32-bit values -// xᵢ[n] - limb n of variable x with bit width i -// -// Limbs are expressed in little endian order, so for 26-bit -// limbs x₂₆[4] will be the most significant limb and x₂₆[0] -// will be the least significant limb. - -// masking constants -#define MOD24 V0 // [0x0000000000ffffff, 0x0000000000ffffff] - mask low 24-bits -#define MOD26 V1 // [0x0000000003ffffff, 0x0000000003ffffff] - mask low 26-bits - -// expansion constants (see EXPAND macro) -#define EX0 V2 -#define EX1 V3 -#define EX2 V4 - -// key (r², r or 1 depending on context) -#define R_0 V5 -#define R_1 V6 -#define R_2 V7 -#define R_3 V8 -#define R_4 V9 - -// precalculated coefficients (5r², 5r or 0 depending on context) -#define R5_1 V10 -#define R5_2 V11 -#define R5_3 V12 -#define R5_4 V13 - -// message block (m) -#define M_0 V14 -#define M_1 V15 -#define M_2 V16 -#define M_3 V17 -#define M_4 V18 - -// accumulator (h) -#define H_0 V19 -#define H_1 V20 -#define H_2 V21 -#define H_3 V22 -#define H_4 V23 - -// temporary registers (for short-lived values) -#define T_0 V24 -#define T_1 V25 -#define T_2 V26 -#define T_3 V27 -#define T_4 V28 - -GLOBL ·constants<>(SB), RODATA, $0x30 -// EX0 -DATA ·constants<>+0x00(SB)/8, $0x0006050403020100 -DATA ·constants<>+0x08(SB)/8, $0x1016151413121110 -// EX1 -DATA ·constants<>+0x10(SB)/8, $0x060c0b0a09080706 -DATA ·constants<>+0x18(SB)/8, $0x161c1b1a19181716 -// EX2 -DATA ·constants<>+0x20(SB)/8, $0x0d0d0d0d0d0f0e0d -DATA ·constants<>+0x28(SB)/8, $0x1d1d1d1d1d1f1e1d - -// MULTIPLY multiplies each lane of f and g, partially reduced -// modulo 2¹³⁰ - 5. The result, h, consists of partial products -// in each lane that need to be reduced further to produce the -// final result. -// -// h₁₃₀ = (f₁₃₀g₁₃₀) % 2¹³⁰ + (5f₁₃₀g₁₃₀) / 2¹³⁰ -// -// Note that the multiplication by 5 of the high bits is -// achieved by precalculating the multiplication of four of the -// g coefficients by 5. These are g51-g54. -#define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \ - VMLOF f0, g0, h0 \ - VMLOF f0, g3, h3 \ - VMLOF f0, g1, h1 \ - VMLOF f0, g4, h4 \ - VMLOF f0, g2, h2 \ - VMLOF f1, g54, T_0 \ - VMLOF f1, g2, T_3 \ - VMLOF f1, g0, T_1 \ - VMLOF f1, g3, T_4 \ - VMLOF f1, g1, T_2 \ - VMALOF f2, g53, h0, h0 \ - VMALOF f2, g1, h3, h3 \ - VMALOF f2, g54, h1, h1 \ - VMALOF f2, g2, h4, h4 \ - VMALOF f2, g0, h2, h2 \ - VMALOF f3, g52, T_0, T_0 \ - VMALOF f3, g0, T_3, T_3 \ - VMALOF f3, g53, T_1, T_1 \ - VMALOF f3, g1, T_4, T_4 \ - VMALOF f3, g54, T_2, T_2 \ - VMALOF f4, g51, h0, h0 \ - VMALOF f4, g54, h3, h3 \ - VMALOF f4, g52, h1, h1 \ - VMALOF f4, g0, h4, h4 \ - VMALOF f4, g53, h2, h2 \ - VAG T_0, h0, h0 \ - VAG T_3, h3, h3 \ - VAG T_1, h1, h1 \ - VAG T_4, h4, h4 \ - VAG T_2, h2, h2 - -// REDUCE performs the following carry operations in four -// stages, as specified in Bernstein & Schwabe: -// -// 1: h₂₆[0]->h₂₆[1] h₂₆[3]->h₂₆[4] -// 2: h₂₆[1]->h₂₆[2] h₂₆[4]->h₂₆[0] -// 3: h₂₆[0]->h₂₆[1] h₂₆[2]->h₂₆[3] -// 4: h₂₆[3]->h₂₆[4] -// -// The result is that all of the limbs are limited to 26-bits -// except for h₂₆[1] and h₂₆[4] which are limited to 27-bits. -// -// Note that although each limb is aligned at 26-bit intervals -// they may contain values that exceed 2²⁶ - 1, hence the need -// to carry the excess bits in each limb. -#define REDUCE(h0, h1, h2, h3, h4) \ - VESRLG $26, h0, T_0 \ - VESRLG $26, h3, T_1 \ - VN MOD26, h0, h0 \ - VN MOD26, h3, h3 \ - VAG T_0, h1, h1 \ - VAG T_1, h4, h4 \ - VESRLG $26, h1, T_2 \ - VESRLG $26, h4, T_3 \ - VN MOD26, h1, h1 \ - VN MOD26, h4, h4 \ - VESLG $2, T_3, T_4 \ - VAG T_3, T_4, T_4 \ - VAG T_2, h2, h2 \ - VAG T_4, h0, h0 \ - VESRLG $26, h2, T_0 \ - VESRLG $26, h0, T_1 \ - VN MOD26, h2, h2 \ - VN MOD26, h0, h0 \ - VAG T_0, h3, h3 \ - VAG T_1, h1, h1 \ - VESRLG $26, h3, T_2 \ - VN MOD26, h3, h3 \ - VAG T_2, h4, h4 - -// EXPAND splits the 128-bit little-endian values in0 and in1 -// into 26-bit big-endian limbs and places the results into -// the first and second lane of d₂₆[0:4] respectively. -// -// The EX0, EX1 and EX2 constants are arrays of byte indices -// for permutation. The permutation both reverses the bytes -// in the input and ensures the bytes are copied into the -// destination limb ready to be shifted into their final -// position. -#define EXPAND(in0, in1, d0, d1, d2, d3, d4) \ - VPERM in0, in1, EX0, d0 \ - VPERM in0, in1, EX1, d2 \ - VPERM in0, in1, EX2, d4 \ - VESRLG $26, d0, d1 \ - VESRLG $30, d2, d3 \ - VESRLG $4, d2, d2 \ - VN MOD26, d0, d0 \ // [in0₂₆[0], in1₂₆[0]] - VN MOD26, d3, d3 \ // [in0₂₆[3], in1₂₆[3]] - VN MOD26, d1, d1 \ // [in0₂₆[1], in1₂₆[1]] - VN MOD24, d4, d4 \ // [in0₂₆[4], in1₂₆[4]] - VN MOD26, d2, d2 // [in0₂₆[2], in1₂₆[2]] - -// func updateVX(state *macState, msg []byte) -TEXT ·updateVX(SB), NOSPLIT, $0 - MOVD state+0(FP), R1 - LMG msg+8(FP), R2, R3 // R2=msg_base, R3=msg_len - - // load EX0, EX1 and EX2 - MOVD $·constants<>(SB), R5 - VLM (R5), EX0, EX2 - - // generate masks - VGMG $(64-24), $63, MOD24 // [0x00ffffff, 0x00ffffff] - VGMG $(64-26), $63, MOD26 // [0x03ffffff, 0x03ffffff] - - // load h (accumulator) and r (key) from state - VZERO T_1 // [0, 0] - VL 0(R1), T_0 // [h₆₄[0], h₆₄[1]] - VLEG $0, 16(R1), T_1 // [h₆₄[2], 0] - VL 24(R1), T_2 // [r₆₄[0], r₆₄[1]] - VPDI $0, T_0, T_2, T_3 // [h₆₄[0], r₆₄[0]] - VPDI $5, T_0, T_2, T_4 // [h₆₄[1], r₆₄[1]] - - // unpack h and r into 26-bit limbs - // note: h₆₄[2] may have the low 3 bits set, so h₂₆[4] is a 27-bit value - VN MOD26, T_3, H_0 // [h₂₆[0], r₂₆[0]] - VZERO H_1 // [0, 0] - VZERO H_3 // [0, 0] - VGMG $(64-12-14), $(63-12), T_0 // [0x03fff000, 0x03fff000] - 26-bit mask with low 12 bits masked out - VESLG $24, T_1, T_1 // [h₆₄[2]<<24, 0] - VERIMG $-26&63, T_3, MOD26, H_1 // [h₂₆[1], r₂₆[1]] - VESRLG $+52&63, T_3, H_2 // [h₂₆[2], r₂₆[2]] - low 12 bits only - VERIMG $-14&63, T_4, MOD26, H_3 // [h₂₆[1], r₂₆[1]] - VESRLG $40, T_4, H_4 // [h₂₆[4], r₂₆[4]] - low 24 bits only - VERIMG $+12&63, T_4, T_0, H_2 // [h₂₆[2], r₂₆[2]] - complete - VO T_1, H_4, H_4 // [h₂₆[4], r₂₆[4]] - complete - - // replicate r across all 4 vector elements - VREPF $3, H_0, R_0 // [r₂₆[0], r₂₆[0], r₂₆[0], r₂₆[0]] - VREPF $3, H_1, R_1 // [r₂₆[1], r₂₆[1], r₂₆[1], r₂₆[1]] - VREPF $3, H_2, R_2 // [r₂₆[2], r₂₆[2], r₂₆[2], r₂₆[2]] - VREPF $3, H_3, R_3 // [r₂₆[3], r₂₆[3], r₂₆[3], r₂₆[3]] - VREPF $3, H_4, R_4 // [r₂₆[4], r₂₆[4], r₂₆[4], r₂₆[4]] - - // zero out lane 1 of h - VLEIG $1, $0, H_0 // [h₂₆[0], 0] - VLEIG $1, $0, H_1 // [h₂₆[1], 0] - VLEIG $1, $0, H_2 // [h₂₆[2], 0] - VLEIG $1, $0, H_3 // [h₂₆[3], 0] - VLEIG $1, $0, H_4 // [h₂₆[4], 0] - - // calculate 5r (ignore least significant limb) - VREPIF $5, T_0 - VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r₂₆[1], 5r₂₆[1], 5r₂₆[1]] - VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r₂₆[2], 5r₂₆[2], 5r₂₆[2]] - VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r₂₆[3], 5r₂₆[3], 5r₂₆[3]] - VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r₂₆[4], 5r₂₆[4], 5r₂₆[4]] - - // skip r² calculation if we are only calculating one block - CMPBLE R3, $16, skip - - // calculate r² - MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, M_0, M_1, M_2, M_3, M_4) - REDUCE(M_0, M_1, M_2, M_3, M_4) - VGBM $0x0f0f, T_0 - VERIMG $0, M_0, T_0, R_0 // [r₂₆[0], r²₂₆[0], r₂₆[0], r²₂₆[0]] - VERIMG $0, M_1, T_0, R_1 // [r₂₆[1], r²₂₆[1], r₂₆[1], r²₂₆[1]] - VERIMG $0, M_2, T_0, R_2 // [r₂₆[2], r²₂₆[2], r₂₆[2], r²₂₆[2]] - VERIMG $0, M_3, T_0, R_3 // [r₂₆[3], r²₂₆[3], r₂₆[3], r²₂₆[3]] - VERIMG $0, M_4, T_0, R_4 // [r₂₆[4], r²₂₆[4], r₂₆[4], r²₂₆[4]] - - // calculate 5r² (ignore least significant limb) - VREPIF $5, T_0 - VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r²₂₆[1], 5r₂₆[1], 5r²₂₆[1]] - VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r²₂₆[2], 5r₂₆[2], 5r²₂₆[2]] - VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r²₂₆[3], 5r₂₆[3], 5r²₂₆[3]] - VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r²₂₆[4], 5r₂₆[4], 5r²₂₆[4]] - -loop: - CMPBLE R3, $32, b2 // 2 or fewer blocks remaining, need to change key coefficients - - // load next 2 blocks from message - VLM (R2), T_0, T_1 - - // update message slice - SUB $32, R3 - MOVD $32(R2), R2 - - // unpack message blocks into 26-bit big-endian limbs - EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) - - // add 2¹²⁸ to each message block value - VLEIB $4, $1, M_4 - VLEIB $12, $1, M_4 - -multiply: - // accumulate the incoming message - VAG H_0, M_0, M_0 - VAG H_3, M_3, M_3 - VAG H_1, M_1, M_1 - VAG H_4, M_4, M_4 - VAG H_2, M_2, M_2 - - // multiply the accumulator by the key coefficient - MULTIPLY(M_0, M_1, M_2, M_3, M_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4) - - // carry and partially reduce the partial products - REDUCE(H_0, H_1, H_2, H_3, H_4) - - CMPBNE R3, $0, loop - -finish: - // sum lane 0 and lane 1 and put the result in lane 1 - VZERO T_0 - VSUMQG H_0, T_0, H_0 - VSUMQG H_3, T_0, H_3 - VSUMQG H_1, T_0, H_1 - VSUMQG H_4, T_0, H_4 - VSUMQG H_2, T_0, H_2 - - // reduce again after summation - // TODO(mundaym): there might be a more efficient way to do this - // now that we only have 1 active lane. For example, we could - // simultaneously pack the values as we reduce them. - REDUCE(H_0, H_1, H_2, H_3, H_4) - - // carry h[1] through to h[4] so that only h[4] can exceed 2²⁶ - 1 - // TODO(mundaym): in testing this final carry was unnecessary. - // Needs a proof before it can be removed though. - VESRLG $26, H_1, T_1 - VN MOD26, H_1, H_1 - VAQ T_1, H_2, H_2 - VESRLG $26, H_2, T_2 - VN MOD26, H_2, H_2 - VAQ T_2, H_3, H_3 - VESRLG $26, H_3, T_3 - VN MOD26, H_3, H_3 - VAQ T_3, H_4, H_4 - - // h is now < 2(2¹³⁰ - 5) - // Pack each lane in h₂₆[0:4] into h₁₂₈[0:1]. - VESLG $26, H_1, H_1 - VESLG $26, H_3, H_3 - VO H_0, H_1, H_0 - VO H_2, H_3, H_2 - VESLG $4, H_2, H_2 - VLEIB $7, $48, H_1 - VSLB H_1, H_2, H_2 - VO H_0, H_2, H_0 - VLEIB $7, $104, H_1 - VSLB H_1, H_4, H_3 - VO H_3, H_0, H_0 - VLEIB $7, $24, H_1 - VSRLB H_1, H_4, H_1 - - // update state - VSTEG $1, H_0, 0(R1) - VSTEG $0, H_0, 8(R1) - VSTEG $1, H_1, 16(R1) - RET - -b2: // 2 or fewer blocks remaining - CMPBLE R3, $16, b1 - - // Load the 2 remaining blocks (17-32 bytes remaining). - MOVD $-17(R3), R0 // index of final byte to load modulo 16 - VL (R2), T_0 // load full 16 byte block - VLL R0, 16(R2), T_1 // load final (possibly partial) block and pad with zeros to 16 bytes - - // The Poly1305 algorithm requires that a 1 bit be appended to - // each message block. If the final block is less than 16 bytes - // long then it is easiest to insert the 1 before the message - // block is split into 26-bit limbs. If, on the other hand, the - // final message block is 16 bytes long then we append the 1 bit - // after expansion as normal. - MOVBZ $1, R0 - MOVD $-16(R3), R3 // index of byte in last block to insert 1 at (could be 16) - CMPBEQ R3, $16, 2(PC) // skip the insertion if the final block is 16 bytes long - VLVGB R3, R0, T_1 // insert 1 into the byte at index R3 - - // Split both blocks into 26-bit limbs in the appropriate lanes. - EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) - - // Append a 1 byte to the end of the second to last block. - VLEIB $4, $1, M_4 - - // Append a 1 byte to the end of the last block only if it is a - // full 16 byte block. - CMPBNE R3, $16, 2(PC) - VLEIB $12, $1, M_4 - - // Finally, set up the coefficients for the final multiplication. - // We have previously saved r and 5r in the 32-bit even indexes - // of the R_[0-4] and R5_[1-4] coefficient registers. - // - // We want lane 0 to be multiplied by r² so that can be kept the - // same. We want lane 1 to be multiplied by r so we need to move - // the saved r value into the 32-bit odd index in lane 1 by - // rotating the 64-bit lane by 32. - VGBM $0x00ff, T_0 // [0, 0xffffffffffffffff] - mask lane 1 only - VERIMG $32, R_0, T_0, R_0 // [_, r²₂₆[0], _, r₂₆[0]] - VERIMG $32, R_1, T_0, R_1 // [_, r²₂₆[1], _, r₂₆[1]] - VERIMG $32, R_2, T_0, R_2 // [_, r²₂₆[2], _, r₂₆[2]] - VERIMG $32, R_3, T_0, R_3 // [_, r²₂₆[3], _, r₂₆[3]] - VERIMG $32, R_4, T_0, R_4 // [_, r²₂₆[4], _, r₂₆[4]] - VERIMG $32, R5_1, T_0, R5_1 // [_, 5r²₂₆[1], _, 5r₂₆[1]] - VERIMG $32, R5_2, T_0, R5_2 // [_, 5r²₂₆[2], _, 5r₂₆[2]] - VERIMG $32, R5_3, T_0, R5_3 // [_, 5r²₂₆[3], _, 5r₂₆[3]] - VERIMG $32, R5_4, T_0, R5_4 // [_, 5r²₂₆[4], _, 5r₂₆[4]] - - MOVD $0, R3 - BR multiply - -skip: - CMPBEQ R3, $0, finish - -b1: // 1 block remaining - - // Load the final block (1-16 bytes). This will be placed into - // lane 0. - MOVD $-1(R3), R0 - VLL R0, (R2), T_0 // pad to 16 bytes with zeros - - // The Poly1305 algorithm requires that a 1 bit be appended to - // each message block. If the final block is less than 16 bytes - // long then it is easiest to insert the 1 before the message - // block is split into 26-bit limbs. If, on the other hand, the - // final message block is 16 bytes long then we append the 1 bit - // after expansion as normal. - MOVBZ $1, R0 - CMPBEQ R3, $16, 2(PC) - VLVGB R3, R0, T_0 - - // Set the message block in lane 1 to the value 0 so that it - // can be accumulated without affecting the final result. - VZERO T_1 - - // Split the final message block into 26-bit limbs in lane 0. - // Lane 1 will be contain 0. - EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) - - // Append a 1 byte to the end of the last block only if it is a - // full 16 byte block. - CMPBNE R3, $16, 2(PC) - VLEIB $4, $1, M_4 - - // We have previously saved r and 5r in the 32-bit even indexes - // of the R_[0-4] and R5_[1-4] coefficient registers. - // - // We want lane 0 to be multiplied by r so we need to move the - // saved r value into the 32-bit odd index in lane 0. We want - // lane 1 to be set to the value 1. This makes multiplication - // a no-op. We do this by setting lane 1 in every register to 0 - // and then just setting the 32-bit index 3 in R_0 to 1. - VZERO T_0 - MOVD $0, R0 - MOVD $0x10111213, R12 - VLVGP R12, R0, T_1 // [_, 0x10111213, _, 0x00000000] - VPERM T_0, R_0, T_1, R_0 // [_, r₂₆[0], _, 0] - VPERM T_0, R_1, T_1, R_1 // [_, r₂₆[1], _, 0] - VPERM T_0, R_2, T_1, R_2 // [_, r₂₆[2], _, 0] - VPERM T_0, R_3, T_1, R_3 // [_, r₂₆[3], _, 0] - VPERM T_0, R_4, T_1, R_4 // [_, r₂₆[4], _, 0] - VPERM T_0, R5_1, T_1, R5_1 // [_, 5r₂₆[1], _, 0] - VPERM T_0, R5_2, T_1, R5_2 // [_, 5r₂₆[2], _, 0] - VPERM T_0, R5_3, T_1, R5_3 // [_, 5r₂₆[3], _, 0] - VPERM T_0, R5_4, T_1, R5_4 // [_, 5r₂₆[4], _, 0] - - // Set the value of lane 1 to be 1. - VLEIF $3, $1, R_0 // [_, r₂₆[0], _, 1] - - MOVD $0, R3 - BR multiply diff --git a/vendor/golang.org/x/crypto/nacl/box/box.go b/vendor/golang.org/x/crypto/nacl/box/box.go deleted file mode 100644 index 7f3b830e..00000000 --- a/vendor/golang.org/x/crypto/nacl/box/box.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package box authenticates and encrypts small messages using public-key cryptography. - -Box uses Curve25519, XSalsa20 and Poly1305 to encrypt and authenticate -messages. The length of messages is not hidden. - -It is the caller's responsibility to ensure the uniqueness of nonces—for -example, by using nonce 1 for the first message, nonce 2 for the second -message, etc. Nonces are long enough that randomly generated nonces have -negligible risk of collision. - -Messages should be small because: - -1. The whole message needs to be held in memory to be processed. - -2. Using large messages pressures implementations on small machines to decrypt -and process plaintext before authenticating it. This is very dangerous, and -this API does not allow it, but a protocol that uses excessive message sizes -might present some implementations with no other choice. - -3. Fixed overheads will be sufficiently amortised by messages as small as 8KB. - -4. Performance may be improved by working with messages that fit into data caches. - -Thus large amounts of data should be chunked so that each message is small. -(Each message still needs a unique nonce.) If in doubt, 16KB is a reasonable -chunk size. - -This package is interoperable with NaCl: https://nacl.cr.yp.to/box.html. -Anonymous sealing/opening is an extension of NaCl defined by and interoperable -with libsodium: -https://libsodium.gitbook.io/doc/public-key_cryptography/sealed_boxes. -*/ -package box // import "golang.org/x/crypto/nacl/box" - -import ( - cryptorand "crypto/rand" - "io" - - "golang.org/x/crypto/blake2b" - "golang.org/x/crypto/curve25519" - "golang.org/x/crypto/nacl/secretbox" - "golang.org/x/crypto/salsa20/salsa" -) - -const ( - // Overhead is the number of bytes of overhead when boxing a message. - Overhead = secretbox.Overhead - - // AnonymousOverhead is the number of bytes of overhead when using anonymous - // sealed boxes. - AnonymousOverhead = Overhead + 32 -) - -// GenerateKey generates a new public/private key pair suitable for use with -// Seal and Open. -func GenerateKey(rand io.Reader) (publicKey, privateKey *[32]byte, err error) { - publicKey = new([32]byte) - privateKey = new([32]byte) - _, err = io.ReadFull(rand, privateKey[:]) - if err != nil { - publicKey = nil - privateKey = nil - return - } - - curve25519.ScalarBaseMult(publicKey, privateKey) - return -} - -var zeros [16]byte - -// Precompute calculates the shared key between peersPublicKey and privateKey -// and writes it to sharedKey. The shared key can be used with -// OpenAfterPrecomputation and SealAfterPrecomputation to speed up processing -// when using the same pair of keys repeatedly. -func Precompute(sharedKey, peersPublicKey, privateKey *[32]byte) { - curve25519.ScalarMult(sharedKey, privateKey, peersPublicKey) - salsa.HSalsa20(sharedKey, &zeros, sharedKey, &salsa.Sigma) -} - -// Seal appends an encrypted and authenticated copy of message to out, which -// will be Overhead bytes longer than the original and must not overlap it. The -// nonce must be unique for each distinct message for a given pair of keys. -func Seal(out, message []byte, nonce *[24]byte, peersPublicKey, privateKey *[32]byte) []byte { - var sharedKey [32]byte - Precompute(&sharedKey, peersPublicKey, privateKey) - return secretbox.Seal(out, message, nonce, &sharedKey) -} - -// SealAfterPrecomputation performs the same actions as Seal, but takes a -// shared key as generated by Precompute. -func SealAfterPrecomputation(out, message []byte, nonce *[24]byte, sharedKey *[32]byte) []byte { - return secretbox.Seal(out, message, nonce, sharedKey) -} - -// Open authenticates and decrypts a box produced by Seal and appends the -// message to out, which must not overlap box. The output will be Overhead -// bytes smaller than box. -func Open(out, box []byte, nonce *[24]byte, peersPublicKey, privateKey *[32]byte) ([]byte, bool) { - var sharedKey [32]byte - Precompute(&sharedKey, peersPublicKey, privateKey) - return secretbox.Open(out, box, nonce, &sharedKey) -} - -// OpenAfterPrecomputation performs the same actions as Open, but takes a -// shared key as generated by Precompute. -func OpenAfterPrecomputation(out, box []byte, nonce *[24]byte, sharedKey *[32]byte) ([]byte, bool) { - return secretbox.Open(out, box, nonce, sharedKey) -} - -// SealAnonymous appends an encrypted and authenticated copy of message to out, -// which will be AnonymousOverhead bytes longer than the original and must not -// overlap it. This differs from Seal in that the sender is not required to -// provide a private key. -func SealAnonymous(out, message []byte, recipient *[32]byte, rand io.Reader) ([]byte, error) { - if rand == nil { - rand = cryptorand.Reader - } - ephemeralPub, ephemeralPriv, err := GenerateKey(rand) - if err != nil { - return nil, err - } - - var nonce [24]byte - if err := sealNonce(ephemeralPub, recipient, &nonce); err != nil { - return nil, err - } - - if total := len(out) + AnonymousOverhead + len(message); cap(out) < total { - original := out - out = make([]byte, 0, total) - out = append(out, original...) - } - out = append(out, ephemeralPub[:]...) - - return Seal(out, message, &nonce, recipient, ephemeralPriv), nil -} - -// OpenAnonymous authenticates and decrypts a box produced by SealAnonymous and -// appends the message to out, which must not overlap box. The output will be -// AnonymousOverhead bytes smaller than box. -func OpenAnonymous(out, box []byte, publicKey, privateKey *[32]byte) (message []byte, ok bool) { - if len(box) < AnonymousOverhead { - return nil, false - } - - var ephemeralPub [32]byte - copy(ephemeralPub[:], box[:32]) - - var nonce [24]byte - if err := sealNonce(&ephemeralPub, publicKey, &nonce); err != nil { - return nil, false - } - - return Open(out, box[32:], &nonce, &ephemeralPub, privateKey) -} - -// sealNonce generates a 24 byte nonce that is a blake2b digest of the -// ephemeral public key and the receiver's public key. -func sealNonce(ephemeralPub, peersPublicKey *[32]byte, nonce *[24]byte) error { - h, err := blake2b.New(24, nil) - if err != nil { - return err - } - - if _, err = h.Write(ephemeralPub[:]); err != nil { - return err - } - - if _, err = h.Write(peersPublicKey[:]); err != nil { - return err - } - - h.Sum(nonce[:0]) - - return nil -} diff --git a/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go deleted file mode 100644 index f3c3242a..00000000 --- a/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package secretbox encrypts and authenticates small messages. - -Secretbox uses XSalsa20 and Poly1305 to encrypt and authenticate messages with -secret-key cryptography. The length of messages is not hidden. - -It is the caller's responsibility to ensure the uniqueness of nonces—for -example, by using nonce 1 for the first message, nonce 2 for the second -message, etc. Nonces are long enough that randomly generated nonces have -negligible risk of collision. - -Messages should be small because: - -1. The whole message needs to be held in memory to be processed. - -2. Using large messages pressures implementations on small machines to decrypt -and process plaintext before authenticating it. This is very dangerous, and -this API does not allow it, but a protocol that uses excessive message sizes -might present some implementations with no other choice. - -3. Fixed overheads will be sufficiently amortised by messages as small as 8KB. - -4. Performance may be improved by working with messages that fit into data caches. - -Thus large amounts of data should be chunked so that each message is small. -(Each message still needs a unique nonce.) If in doubt, 16KB is a reasonable -chunk size. - -This package is interoperable with NaCl: https://nacl.cr.yp.to/secretbox.html. -*/ -package secretbox // import "golang.org/x/crypto/nacl/secretbox" - -import ( - "golang.org/x/crypto/internal/alias" - "golang.org/x/crypto/internal/poly1305" - "golang.org/x/crypto/salsa20/salsa" -) - -// Overhead is the number of bytes of overhead when boxing a message. -const Overhead = poly1305.TagSize - -// setup produces a sub-key and Salsa20 counter given a nonce and key. -func setup(subKey *[32]byte, counter *[16]byte, nonce *[24]byte, key *[32]byte) { - // We use XSalsa20 for encryption so first we need to generate a - // key and nonce with HSalsa20. - var hNonce [16]byte - copy(hNonce[:], nonce[:]) - salsa.HSalsa20(subKey, &hNonce, key, &salsa.Sigma) - - // The final 8 bytes of the original nonce form the new nonce. - copy(counter[:], nonce[16:]) -} - -// sliceForAppend takes a slice and a requested number of bytes. It returns a -// slice with the contents of the given slice followed by that many bytes and a -// second slice that aliases into it and contains only the extra bytes. If the -// original slice has sufficient capacity then no allocation is performed. -func sliceForAppend(in []byte, n int) (head, tail []byte) { - if total := len(in) + n; cap(in) >= total { - head = in[:total] - } else { - head = make([]byte, total) - copy(head, in) - } - tail = head[len(in):] - return -} - -// Seal appends an encrypted and authenticated copy of message to out, which -// must not overlap message. The key and nonce pair must be unique for each -// distinct message and the output will be Overhead bytes longer than message. -func Seal(out, message []byte, nonce *[24]byte, key *[32]byte) []byte { - var subKey [32]byte - var counter [16]byte - setup(&subKey, &counter, nonce, key) - - // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since - // Salsa20 works with 64-byte blocks, we also generate 32 bytes of - // keystream as a side effect. - var firstBlock [64]byte - salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey) - - var poly1305Key [32]byte - copy(poly1305Key[:], firstBlock[:]) - - ret, out := sliceForAppend(out, len(message)+poly1305.TagSize) - if alias.AnyOverlap(out, message) { - panic("nacl: invalid buffer overlap") - } - - // We XOR up to 32 bytes of message with the keystream generated from - // the first block. - firstMessageBlock := message - if len(firstMessageBlock) > 32 { - firstMessageBlock = firstMessageBlock[:32] - } - - tagOut := out - out = out[poly1305.TagSize:] - for i, x := range firstMessageBlock { - out[i] = firstBlock[32+i] ^ x - } - message = message[len(firstMessageBlock):] - ciphertext := out - out = out[len(firstMessageBlock):] - - // Now encrypt the rest. - counter[8] = 1 - salsa.XORKeyStream(out, message, &counter, &subKey) - - var tag [poly1305.TagSize]byte - poly1305.Sum(&tag, ciphertext, &poly1305Key) - copy(tagOut, tag[:]) - - return ret -} - -// Open authenticates and decrypts a box produced by Seal and appends the -// message to out, which must not overlap box. The output will be Overhead -// bytes smaller than box. -func Open(out, box []byte, nonce *[24]byte, key *[32]byte) ([]byte, bool) { - if len(box) < Overhead { - return nil, false - } - - var subKey [32]byte - var counter [16]byte - setup(&subKey, &counter, nonce, key) - - // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since - // Salsa20 works with 64-byte blocks, we also generate 32 bytes of - // keystream as a side effect. - var firstBlock [64]byte - salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey) - - var poly1305Key [32]byte - copy(poly1305Key[:], firstBlock[:]) - var tag [poly1305.TagSize]byte - copy(tag[:], box) - - if !poly1305.Verify(&tag, box[poly1305.TagSize:], &poly1305Key) { - return nil, false - } - - ret, out := sliceForAppend(out, len(box)-Overhead) - if alias.AnyOverlap(out, box) { - panic("nacl: invalid buffer overlap") - } - - // We XOR up to 32 bytes of box with the keystream generated from - // the first block. - box = box[Overhead:] - firstMessageBlock := box - if len(firstMessageBlock) > 32 { - firstMessageBlock = firstMessageBlock[:32] - } - for i, x := range firstMessageBlock { - out[i] = firstBlock[32+i] ^ x - } - - box = box[len(firstMessageBlock):] - out = out[len(firstMessageBlock):] - - // Now decrypt the rest. - counter[8] = 1 - salsa.XORKeyStream(out, box, &counter, &subKey) - - return ret, true -} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go deleted file mode 100644 index 3fd05b27..00000000 --- a/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package salsa provides low-level access to functions in the Salsa family. -package salsa // import "golang.org/x/crypto/salsa20/salsa" - -import "math/bits" - -// Sigma is the Salsa20 constant for 256-bit keys. -var Sigma = [16]byte{'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k'} - -// HSalsa20 applies the HSalsa20 core function to a 16-byte input in, 32-byte -// key k, and 16-byte constant c, and puts the result into the 32-byte array -// out. -func HSalsa20(out *[32]byte, in *[16]byte, k *[32]byte, c *[16]byte) { - x0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24 - x1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24 - x2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24 - x3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24 - x4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24 - x5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24 - x6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 - x7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 - x8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 - x9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 - x10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24 - x11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24 - x12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24 - x13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24 - x14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24 - x15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24 - - for i := 0; i < 20; i += 2 { - u := x0 + x12 - x4 ^= bits.RotateLeft32(u, 7) - u = x4 + x0 - x8 ^= bits.RotateLeft32(u, 9) - u = x8 + x4 - x12 ^= bits.RotateLeft32(u, 13) - u = x12 + x8 - x0 ^= bits.RotateLeft32(u, 18) - - u = x5 + x1 - x9 ^= bits.RotateLeft32(u, 7) - u = x9 + x5 - x13 ^= bits.RotateLeft32(u, 9) - u = x13 + x9 - x1 ^= bits.RotateLeft32(u, 13) - u = x1 + x13 - x5 ^= bits.RotateLeft32(u, 18) - - u = x10 + x6 - x14 ^= bits.RotateLeft32(u, 7) - u = x14 + x10 - x2 ^= bits.RotateLeft32(u, 9) - u = x2 + x14 - x6 ^= bits.RotateLeft32(u, 13) - u = x6 + x2 - x10 ^= bits.RotateLeft32(u, 18) - - u = x15 + x11 - x3 ^= bits.RotateLeft32(u, 7) - u = x3 + x15 - x7 ^= bits.RotateLeft32(u, 9) - u = x7 + x3 - x11 ^= bits.RotateLeft32(u, 13) - u = x11 + x7 - x15 ^= bits.RotateLeft32(u, 18) - - u = x0 + x3 - x1 ^= bits.RotateLeft32(u, 7) - u = x1 + x0 - x2 ^= bits.RotateLeft32(u, 9) - u = x2 + x1 - x3 ^= bits.RotateLeft32(u, 13) - u = x3 + x2 - x0 ^= bits.RotateLeft32(u, 18) - - u = x5 + x4 - x6 ^= bits.RotateLeft32(u, 7) - u = x6 + x5 - x7 ^= bits.RotateLeft32(u, 9) - u = x7 + x6 - x4 ^= bits.RotateLeft32(u, 13) - u = x4 + x7 - x5 ^= bits.RotateLeft32(u, 18) - - u = x10 + x9 - x11 ^= bits.RotateLeft32(u, 7) - u = x11 + x10 - x8 ^= bits.RotateLeft32(u, 9) - u = x8 + x11 - x9 ^= bits.RotateLeft32(u, 13) - u = x9 + x8 - x10 ^= bits.RotateLeft32(u, 18) - - u = x15 + x14 - x12 ^= bits.RotateLeft32(u, 7) - u = x12 + x15 - x13 ^= bits.RotateLeft32(u, 9) - u = x13 + x12 - x14 ^= bits.RotateLeft32(u, 13) - u = x14 + x13 - x15 ^= bits.RotateLeft32(u, 18) - } - out[0] = byte(x0) - out[1] = byte(x0 >> 8) - out[2] = byte(x0 >> 16) - out[3] = byte(x0 >> 24) - - out[4] = byte(x5) - out[5] = byte(x5 >> 8) - out[6] = byte(x5 >> 16) - out[7] = byte(x5 >> 24) - - out[8] = byte(x10) - out[9] = byte(x10 >> 8) - out[10] = byte(x10 >> 16) - out[11] = byte(x10 >> 24) - - out[12] = byte(x15) - out[13] = byte(x15 >> 8) - out[14] = byte(x15 >> 16) - out[15] = byte(x15 >> 24) - - out[16] = byte(x6) - out[17] = byte(x6 >> 8) - out[18] = byte(x6 >> 16) - out[19] = byte(x6 >> 24) - - out[20] = byte(x7) - out[21] = byte(x7 >> 8) - out[22] = byte(x7 >> 16) - out[23] = byte(x7 >> 24) - - out[24] = byte(x8) - out[25] = byte(x8 >> 8) - out[26] = byte(x8 >> 16) - out[27] = byte(x8 >> 24) - - out[28] = byte(x9) - out[29] = byte(x9 >> 8) - out[30] = byte(x9 >> 16) - out[31] = byte(x9 >> 24) -} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go deleted file mode 100644 index 7ec7bb39..00000000 --- a/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package salsa - -import "math/bits" - -// Core208 applies the Salsa20/8 core function to the 64-byte array in and puts -// the result into the 64-byte array out. The input and output may be the same array. -func Core208(out *[64]byte, in *[64]byte) { - j0 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 - j1 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 - j2 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 - j3 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 - j4 := uint32(in[16]) | uint32(in[17])<<8 | uint32(in[18])<<16 | uint32(in[19])<<24 - j5 := uint32(in[20]) | uint32(in[21])<<8 | uint32(in[22])<<16 | uint32(in[23])<<24 - j6 := uint32(in[24]) | uint32(in[25])<<8 | uint32(in[26])<<16 | uint32(in[27])<<24 - j7 := uint32(in[28]) | uint32(in[29])<<8 | uint32(in[30])<<16 | uint32(in[31])<<24 - j8 := uint32(in[32]) | uint32(in[33])<<8 | uint32(in[34])<<16 | uint32(in[35])<<24 - j9 := uint32(in[36]) | uint32(in[37])<<8 | uint32(in[38])<<16 | uint32(in[39])<<24 - j10 := uint32(in[40]) | uint32(in[41])<<8 | uint32(in[42])<<16 | uint32(in[43])<<24 - j11 := uint32(in[44]) | uint32(in[45])<<8 | uint32(in[46])<<16 | uint32(in[47])<<24 - j12 := uint32(in[48]) | uint32(in[49])<<8 | uint32(in[50])<<16 | uint32(in[51])<<24 - j13 := uint32(in[52]) | uint32(in[53])<<8 | uint32(in[54])<<16 | uint32(in[55])<<24 - j14 := uint32(in[56]) | uint32(in[57])<<8 | uint32(in[58])<<16 | uint32(in[59])<<24 - j15 := uint32(in[60]) | uint32(in[61])<<8 | uint32(in[62])<<16 | uint32(in[63])<<24 - - x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8 - x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15 - - for i := 0; i < 8; i += 2 { - u := x0 + x12 - x4 ^= bits.RotateLeft32(u, 7) - u = x4 + x0 - x8 ^= bits.RotateLeft32(u, 9) - u = x8 + x4 - x12 ^= bits.RotateLeft32(u, 13) - u = x12 + x8 - x0 ^= bits.RotateLeft32(u, 18) - - u = x5 + x1 - x9 ^= bits.RotateLeft32(u, 7) - u = x9 + x5 - x13 ^= bits.RotateLeft32(u, 9) - u = x13 + x9 - x1 ^= bits.RotateLeft32(u, 13) - u = x1 + x13 - x5 ^= bits.RotateLeft32(u, 18) - - u = x10 + x6 - x14 ^= bits.RotateLeft32(u, 7) - u = x14 + x10 - x2 ^= bits.RotateLeft32(u, 9) - u = x2 + x14 - x6 ^= bits.RotateLeft32(u, 13) - u = x6 + x2 - x10 ^= bits.RotateLeft32(u, 18) - - u = x15 + x11 - x3 ^= bits.RotateLeft32(u, 7) - u = x3 + x15 - x7 ^= bits.RotateLeft32(u, 9) - u = x7 + x3 - x11 ^= bits.RotateLeft32(u, 13) - u = x11 + x7 - x15 ^= bits.RotateLeft32(u, 18) - - u = x0 + x3 - x1 ^= bits.RotateLeft32(u, 7) - u = x1 + x0 - x2 ^= bits.RotateLeft32(u, 9) - u = x2 + x1 - x3 ^= bits.RotateLeft32(u, 13) - u = x3 + x2 - x0 ^= bits.RotateLeft32(u, 18) - - u = x5 + x4 - x6 ^= bits.RotateLeft32(u, 7) - u = x6 + x5 - x7 ^= bits.RotateLeft32(u, 9) - u = x7 + x6 - x4 ^= bits.RotateLeft32(u, 13) - u = x4 + x7 - x5 ^= bits.RotateLeft32(u, 18) - - u = x10 + x9 - x11 ^= bits.RotateLeft32(u, 7) - u = x11 + x10 - x8 ^= bits.RotateLeft32(u, 9) - u = x8 + x11 - x9 ^= bits.RotateLeft32(u, 13) - u = x9 + x8 - x10 ^= bits.RotateLeft32(u, 18) - - u = x15 + x14 - x12 ^= bits.RotateLeft32(u, 7) - u = x12 + x15 - x13 ^= bits.RotateLeft32(u, 9) - u = x13 + x12 - x14 ^= bits.RotateLeft32(u, 13) - u = x14 + x13 - x15 ^= bits.RotateLeft32(u, 18) - } - x0 += j0 - x1 += j1 - x2 += j2 - x3 += j3 - x4 += j4 - x5 += j5 - x6 += j6 - x7 += j7 - x8 += j8 - x9 += j9 - x10 += j10 - x11 += j11 - x12 += j12 - x13 += j13 - x14 += j14 - x15 += j15 - - out[0] = byte(x0) - out[1] = byte(x0 >> 8) - out[2] = byte(x0 >> 16) - out[3] = byte(x0 >> 24) - - out[4] = byte(x1) - out[5] = byte(x1 >> 8) - out[6] = byte(x1 >> 16) - out[7] = byte(x1 >> 24) - - out[8] = byte(x2) - out[9] = byte(x2 >> 8) - out[10] = byte(x2 >> 16) - out[11] = byte(x2 >> 24) - - out[12] = byte(x3) - out[13] = byte(x3 >> 8) - out[14] = byte(x3 >> 16) - out[15] = byte(x3 >> 24) - - out[16] = byte(x4) - out[17] = byte(x4 >> 8) - out[18] = byte(x4 >> 16) - out[19] = byte(x4 >> 24) - - out[20] = byte(x5) - out[21] = byte(x5 >> 8) - out[22] = byte(x5 >> 16) - out[23] = byte(x5 >> 24) - - out[24] = byte(x6) - out[25] = byte(x6 >> 8) - out[26] = byte(x6 >> 16) - out[27] = byte(x6 >> 24) - - out[28] = byte(x7) - out[29] = byte(x7 >> 8) - out[30] = byte(x7 >> 16) - out[31] = byte(x7 >> 24) - - out[32] = byte(x8) - out[33] = byte(x8 >> 8) - out[34] = byte(x8 >> 16) - out[35] = byte(x8 >> 24) - - out[36] = byte(x9) - out[37] = byte(x9 >> 8) - out[38] = byte(x9 >> 16) - out[39] = byte(x9 >> 24) - - out[40] = byte(x10) - out[41] = byte(x10 >> 8) - out[42] = byte(x10 >> 16) - out[43] = byte(x10 >> 24) - - out[44] = byte(x11) - out[45] = byte(x11 >> 8) - out[46] = byte(x11 >> 16) - out[47] = byte(x11 >> 24) - - out[48] = byte(x12) - out[49] = byte(x12 >> 8) - out[50] = byte(x12 >> 16) - out[51] = byte(x12 >> 24) - - out[52] = byte(x13) - out[53] = byte(x13 >> 8) - out[54] = byte(x13 >> 16) - out[55] = byte(x13 >> 24) - - out[56] = byte(x14) - out[57] = byte(x14 >> 8) - out[58] = byte(x14 >> 16) - out[59] = byte(x14 >> 24) - - out[60] = byte(x15) - out[61] = byte(x15 >> 8) - out[62] = byte(x15 >> 16) - out[63] = byte(x15 >> 24) -} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go deleted file mode 100644 index c400dfcf..00000000 --- a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build amd64 && !purego && gc -// +build amd64,!purego,gc - -package salsa - -//go:noescape - -// salsa2020XORKeyStream is implemented in salsa20_amd64.s. -func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) - -// XORKeyStream crypts bytes from in to out using the given key and counters. -// In and out must overlap entirely or not at all. Counter -// contains the raw salsa20 counter bytes (both nonce and block counter). -func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { - if len(in) == 0 { - return - } - _ = out[len(in)-1] - salsa2020XORKeyStream(&out[0], &in[0], uint64(len(in)), &counter[0], &key[0]) -} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s deleted file mode 100644 index c0892772..00000000 --- a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s +++ /dev/null @@ -1,881 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build amd64 && !purego && gc -// +build amd64,!purego,gc - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html - -// func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) -// This needs up to 64 bytes at 360(R12); hence the non-obvious frame size. -TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment - MOVQ out+0(FP),DI - MOVQ in+8(FP),SI - MOVQ n+16(FP),DX - MOVQ nonce+24(FP),CX - MOVQ key+32(FP),R8 - - MOVQ SP,R12 - ADDQ $31, R12 - ANDQ $~31, R12 - - MOVQ DX,R9 - MOVQ CX,DX - MOVQ R8,R10 - CMPQ R9,$0 - JBE DONE - START: - MOVL 20(R10),CX - MOVL 0(R10),R8 - MOVL 0(DX),AX - MOVL 16(R10),R11 - MOVL CX,0(R12) - MOVL R8, 4 (R12) - MOVL AX, 8 (R12) - MOVL R11, 12 (R12) - MOVL 8(DX),CX - MOVL 24(R10),R8 - MOVL 4(R10),AX - MOVL 4(DX),R11 - MOVL CX,16(R12) - MOVL R8, 20 (R12) - MOVL AX, 24 (R12) - MOVL R11, 28 (R12) - MOVL 12(DX),CX - MOVL 12(R10),DX - MOVL 28(R10),R8 - MOVL 8(R10),AX - MOVL DX,32(R12) - MOVL CX, 36 (R12) - MOVL R8, 40 (R12) - MOVL AX, 44 (R12) - MOVQ $1634760805,DX - MOVQ $857760878,CX - MOVQ $2036477234,R8 - MOVQ $1797285236,AX - MOVL DX,48(R12) - MOVL CX, 52 (R12) - MOVL R8, 56 (R12) - MOVL AX, 60 (R12) - CMPQ R9,$256 - JB BYTESBETWEEN1AND255 - MOVOA 48(R12),X0 - PSHUFL $0X55,X0,X1 - PSHUFL $0XAA,X0,X2 - PSHUFL $0XFF,X0,X3 - PSHUFL $0X00,X0,X0 - MOVOA X1,64(R12) - MOVOA X2,80(R12) - MOVOA X3,96(R12) - MOVOA X0,112(R12) - MOVOA 0(R12),X0 - PSHUFL $0XAA,X0,X1 - PSHUFL $0XFF,X0,X2 - PSHUFL $0X00,X0,X3 - PSHUFL $0X55,X0,X0 - MOVOA X1,128(R12) - MOVOA X2,144(R12) - MOVOA X3,160(R12) - MOVOA X0,176(R12) - MOVOA 16(R12),X0 - PSHUFL $0XFF,X0,X1 - PSHUFL $0X55,X0,X2 - PSHUFL $0XAA,X0,X0 - MOVOA X1,192(R12) - MOVOA X2,208(R12) - MOVOA X0,224(R12) - MOVOA 32(R12),X0 - PSHUFL $0X00,X0,X1 - PSHUFL $0XAA,X0,X2 - PSHUFL $0XFF,X0,X0 - MOVOA X1,240(R12) - MOVOA X2,256(R12) - MOVOA X0,272(R12) - BYTESATLEAST256: - MOVL 16(R12),DX - MOVL 36 (R12),CX - MOVL DX,288(R12) - MOVL CX,304(R12) - SHLQ $32,CX - ADDQ CX,DX - ADDQ $1,DX - MOVQ DX,CX - SHRQ $32,CX - MOVL DX, 292 (R12) - MOVL CX, 308 (R12) - ADDQ $1,DX - MOVQ DX,CX - SHRQ $32,CX - MOVL DX, 296 (R12) - MOVL CX, 312 (R12) - ADDQ $1,DX - MOVQ DX,CX - SHRQ $32,CX - MOVL DX, 300 (R12) - MOVL CX, 316 (R12) - ADDQ $1,DX - MOVQ DX,CX - SHRQ $32,CX - MOVL DX,16(R12) - MOVL CX, 36 (R12) - MOVQ R9,352(R12) - MOVQ $20,DX - MOVOA 64(R12),X0 - MOVOA 80(R12),X1 - MOVOA 96(R12),X2 - MOVOA 256(R12),X3 - MOVOA 272(R12),X4 - MOVOA 128(R12),X5 - MOVOA 144(R12),X6 - MOVOA 176(R12),X7 - MOVOA 192(R12),X8 - MOVOA 208(R12),X9 - MOVOA 224(R12),X10 - MOVOA 304(R12),X11 - MOVOA 112(R12),X12 - MOVOA 160(R12),X13 - MOVOA 240(R12),X14 - MOVOA 288(R12),X15 - MAINLOOP1: - MOVOA X1,320(R12) - MOVOA X2,336(R12) - MOVOA X13,X1 - PADDL X12,X1 - MOVOA X1,X2 - PSLLL $7,X1 - PXOR X1,X14 - PSRLL $25,X2 - PXOR X2,X14 - MOVOA X7,X1 - PADDL X0,X1 - MOVOA X1,X2 - PSLLL $7,X1 - PXOR X1,X11 - PSRLL $25,X2 - PXOR X2,X11 - MOVOA X12,X1 - PADDL X14,X1 - MOVOA X1,X2 - PSLLL $9,X1 - PXOR X1,X15 - PSRLL $23,X2 - PXOR X2,X15 - MOVOA X0,X1 - PADDL X11,X1 - MOVOA X1,X2 - PSLLL $9,X1 - PXOR X1,X9 - PSRLL $23,X2 - PXOR X2,X9 - MOVOA X14,X1 - PADDL X15,X1 - MOVOA X1,X2 - PSLLL $13,X1 - PXOR X1,X13 - PSRLL $19,X2 - PXOR X2,X13 - MOVOA X11,X1 - PADDL X9,X1 - MOVOA X1,X2 - PSLLL $13,X1 - PXOR X1,X7 - PSRLL $19,X2 - PXOR X2,X7 - MOVOA X15,X1 - PADDL X13,X1 - MOVOA X1,X2 - PSLLL $18,X1 - PXOR X1,X12 - PSRLL $14,X2 - PXOR X2,X12 - MOVOA 320(R12),X1 - MOVOA X12,320(R12) - MOVOA X9,X2 - PADDL X7,X2 - MOVOA X2,X12 - PSLLL $18,X2 - PXOR X2,X0 - PSRLL $14,X12 - PXOR X12,X0 - MOVOA X5,X2 - PADDL X1,X2 - MOVOA X2,X12 - PSLLL $7,X2 - PXOR X2,X3 - PSRLL $25,X12 - PXOR X12,X3 - MOVOA 336(R12),X2 - MOVOA X0,336(R12) - MOVOA X6,X0 - PADDL X2,X0 - MOVOA X0,X12 - PSLLL $7,X0 - PXOR X0,X4 - PSRLL $25,X12 - PXOR X12,X4 - MOVOA X1,X0 - PADDL X3,X0 - MOVOA X0,X12 - PSLLL $9,X0 - PXOR X0,X10 - PSRLL $23,X12 - PXOR X12,X10 - MOVOA X2,X0 - PADDL X4,X0 - MOVOA X0,X12 - PSLLL $9,X0 - PXOR X0,X8 - PSRLL $23,X12 - PXOR X12,X8 - MOVOA X3,X0 - PADDL X10,X0 - MOVOA X0,X12 - PSLLL $13,X0 - PXOR X0,X5 - PSRLL $19,X12 - PXOR X12,X5 - MOVOA X4,X0 - PADDL X8,X0 - MOVOA X0,X12 - PSLLL $13,X0 - PXOR X0,X6 - PSRLL $19,X12 - PXOR X12,X6 - MOVOA X10,X0 - PADDL X5,X0 - MOVOA X0,X12 - PSLLL $18,X0 - PXOR X0,X1 - PSRLL $14,X12 - PXOR X12,X1 - MOVOA 320(R12),X0 - MOVOA X1,320(R12) - MOVOA X4,X1 - PADDL X0,X1 - MOVOA X1,X12 - PSLLL $7,X1 - PXOR X1,X7 - PSRLL $25,X12 - PXOR X12,X7 - MOVOA X8,X1 - PADDL X6,X1 - MOVOA X1,X12 - PSLLL $18,X1 - PXOR X1,X2 - PSRLL $14,X12 - PXOR X12,X2 - MOVOA 336(R12),X12 - MOVOA X2,336(R12) - MOVOA X14,X1 - PADDL X12,X1 - MOVOA X1,X2 - PSLLL $7,X1 - PXOR X1,X5 - PSRLL $25,X2 - PXOR X2,X5 - MOVOA X0,X1 - PADDL X7,X1 - MOVOA X1,X2 - PSLLL $9,X1 - PXOR X1,X10 - PSRLL $23,X2 - PXOR X2,X10 - MOVOA X12,X1 - PADDL X5,X1 - MOVOA X1,X2 - PSLLL $9,X1 - PXOR X1,X8 - PSRLL $23,X2 - PXOR X2,X8 - MOVOA X7,X1 - PADDL X10,X1 - MOVOA X1,X2 - PSLLL $13,X1 - PXOR X1,X4 - PSRLL $19,X2 - PXOR X2,X4 - MOVOA X5,X1 - PADDL X8,X1 - MOVOA X1,X2 - PSLLL $13,X1 - PXOR X1,X14 - PSRLL $19,X2 - PXOR X2,X14 - MOVOA X10,X1 - PADDL X4,X1 - MOVOA X1,X2 - PSLLL $18,X1 - PXOR X1,X0 - PSRLL $14,X2 - PXOR X2,X0 - MOVOA 320(R12),X1 - MOVOA X0,320(R12) - MOVOA X8,X0 - PADDL X14,X0 - MOVOA X0,X2 - PSLLL $18,X0 - PXOR X0,X12 - PSRLL $14,X2 - PXOR X2,X12 - MOVOA X11,X0 - PADDL X1,X0 - MOVOA X0,X2 - PSLLL $7,X0 - PXOR X0,X6 - PSRLL $25,X2 - PXOR X2,X6 - MOVOA 336(R12),X2 - MOVOA X12,336(R12) - MOVOA X3,X0 - PADDL X2,X0 - MOVOA X0,X12 - PSLLL $7,X0 - PXOR X0,X13 - PSRLL $25,X12 - PXOR X12,X13 - MOVOA X1,X0 - PADDL X6,X0 - MOVOA X0,X12 - PSLLL $9,X0 - PXOR X0,X15 - PSRLL $23,X12 - PXOR X12,X15 - MOVOA X2,X0 - PADDL X13,X0 - MOVOA X0,X12 - PSLLL $9,X0 - PXOR X0,X9 - PSRLL $23,X12 - PXOR X12,X9 - MOVOA X6,X0 - PADDL X15,X0 - MOVOA X0,X12 - PSLLL $13,X0 - PXOR X0,X11 - PSRLL $19,X12 - PXOR X12,X11 - MOVOA X13,X0 - PADDL X9,X0 - MOVOA X0,X12 - PSLLL $13,X0 - PXOR X0,X3 - PSRLL $19,X12 - PXOR X12,X3 - MOVOA X15,X0 - PADDL X11,X0 - MOVOA X0,X12 - PSLLL $18,X0 - PXOR X0,X1 - PSRLL $14,X12 - PXOR X12,X1 - MOVOA X9,X0 - PADDL X3,X0 - MOVOA X0,X12 - PSLLL $18,X0 - PXOR X0,X2 - PSRLL $14,X12 - PXOR X12,X2 - MOVOA 320(R12),X12 - MOVOA 336(R12),X0 - SUBQ $2,DX - JA MAINLOOP1 - PADDL 112(R12),X12 - PADDL 176(R12),X7 - PADDL 224(R12),X10 - PADDL 272(R12),X4 - MOVD X12,DX - MOVD X7,CX - MOVD X10,R8 - MOVD X4,R9 - PSHUFL $0X39,X12,X12 - PSHUFL $0X39,X7,X7 - PSHUFL $0X39,X10,X10 - PSHUFL $0X39,X4,X4 - XORL 0(SI),DX - XORL 4(SI),CX - XORL 8(SI),R8 - XORL 12(SI),R9 - MOVL DX,0(DI) - MOVL CX,4(DI) - MOVL R8,8(DI) - MOVL R9,12(DI) - MOVD X12,DX - MOVD X7,CX - MOVD X10,R8 - MOVD X4,R9 - PSHUFL $0X39,X12,X12 - PSHUFL $0X39,X7,X7 - PSHUFL $0X39,X10,X10 - PSHUFL $0X39,X4,X4 - XORL 64(SI),DX - XORL 68(SI),CX - XORL 72(SI),R8 - XORL 76(SI),R9 - MOVL DX,64(DI) - MOVL CX,68(DI) - MOVL R8,72(DI) - MOVL R9,76(DI) - MOVD X12,DX - MOVD X7,CX - MOVD X10,R8 - MOVD X4,R9 - PSHUFL $0X39,X12,X12 - PSHUFL $0X39,X7,X7 - PSHUFL $0X39,X10,X10 - PSHUFL $0X39,X4,X4 - XORL 128(SI),DX - XORL 132(SI),CX - XORL 136(SI),R8 - XORL 140(SI),R9 - MOVL DX,128(DI) - MOVL CX,132(DI) - MOVL R8,136(DI) - MOVL R9,140(DI) - MOVD X12,DX - MOVD X7,CX - MOVD X10,R8 - MOVD X4,R9 - XORL 192(SI),DX - XORL 196(SI),CX - XORL 200(SI),R8 - XORL 204(SI),R9 - MOVL DX,192(DI) - MOVL CX,196(DI) - MOVL R8,200(DI) - MOVL R9,204(DI) - PADDL 240(R12),X14 - PADDL 64(R12),X0 - PADDL 128(R12),X5 - PADDL 192(R12),X8 - MOVD X14,DX - MOVD X0,CX - MOVD X5,R8 - MOVD X8,R9 - PSHUFL $0X39,X14,X14 - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X5,X5 - PSHUFL $0X39,X8,X8 - XORL 16(SI),DX - XORL 20(SI),CX - XORL 24(SI),R8 - XORL 28(SI),R9 - MOVL DX,16(DI) - MOVL CX,20(DI) - MOVL R8,24(DI) - MOVL R9,28(DI) - MOVD X14,DX - MOVD X0,CX - MOVD X5,R8 - MOVD X8,R9 - PSHUFL $0X39,X14,X14 - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X5,X5 - PSHUFL $0X39,X8,X8 - XORL 80(SI),DX - XORL 84(SI),CX - XORL 88(SI),R8 - XORL 92(SI),R9 - MOVL DX,80(DI) - MOVL CX,84(DI) - MOVL R8,88(DI) - MOVL R9,92(DI) - MOVD X14,DX - MOVD X0,CX - MOVD X5,R8 - MOVD X8,R9 - PSHUFL $0X39,X14,X14 - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X5,X5 - PSHUFL $0X39,X8,X8 - XORL 144(SI),DX - XORL 148(SI),CX - XORL 152(SI),R8 - XORL 156(SI),R9 - MOVL DX,144(DI) - MOVL CX,148(DI) - MOVL R8,152(DI) - MOVL R9,156(DI) - MOVD X14,DX - MOVD X0,CX - MOVD X5,R8 - MOVD X8,R9 - XORL 208(SI),DX - XORL 212(SI),CX - XORL 216(SI),R8 - XORL 220(SI),R9 - MOVL DX,208(DI) - MOVL CX,212(DI) - MOVL R8,216(DI) - MOVL R9,220(DI) - PADDL 288(R12),X15 - PADDL 304(R12),X11 - PADDL 80(R12),X1 - PADDL 144(R12),X6 - MOVD X15,DX - MOVD X11,CX - MOVD X1,R8 - MOVD X6,R9 - PSHUFL $0X39,X15,X15 - PSHUFL $0X39,X11,X11 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X6,X6 - XORL 32(SI),DX - XORL 36(SI),CX - XORL 40(SI),R8 - XORL 44(SI),R9 - MOVL DX,32(DI) - MOVL CX,36(DI) - MOVL R8,40(DI) - MOVL R9,44(DI) - MOVD X15,DX - MOVD X11,CX - MOVD X1,R8 - MOVD X6,R9 - PSHUFL $0X39,X15,X15 - PSHUFL $0X39,X11,X11 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X6,X6 - XORL 96(SI),DX - XORL 100(SI),CX - XORL 104(SI),R8 - XORL 108(SI),R9 - MOVL DX,96(DI) - MOVL CX,100(DI) - MOVL R8,104(DI) - MOVL R9,108(DI) - MOVD X15,DX - MOVD X11,CX - MOVD X1,R8 - MOVD X6,R9 - PSHUFL $0X39,X15,X15 - PSHUFL $0X39,X11,X11 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X6,X6 - XORL 160(SI),DX - XORL 164(SI),CX - XORL 168(SI),R8 - XORL 172(SI),R9 - MOVL DX,160(DI) - MOVL CX,164(DI) - MOVL R8,168(DI) - MOVL R9,172(DI) - MOVD X15,DX - MOVD X11,CX - MOVD X1,R8 - MOVD X6,R9 - XORL 224(SI),DX - XORL 228(SI),CX - XORL 232(SI),R8 - XORL 236(SI),R9 - MOVL DX,224(DI) - MOVL CX,228(DI) - MOVL R8,232(DI) - MOVL R9,236(DI) - PADDL 160(R12),X13 - PADDL 208(R12),X9 - PADDL 256(R12),X3 - PADDL 96(R12),X2 - MOVD X13,DX - MOVD X9,CX - MOVD X3,R8 - MOVD X2,R9 - PSHUFL $0X39,X13,X13 - PSHUFL $0X39,X9,X9 - PSHUFL $0X39,X3,X3 - PSHUFL $0X39,X2,X2 - XORL 48(SI),DX - XORL 52(SI),CX - XORL 56(SI),R8 - XORL 60(SI),R9 - MOVL DX,48(DI) - MOVL CX,52(DI) - MOVL R8,56(DI) - MOVL R9,60(DI) - MOVD X13,DX - MOVD X9,CX - MOVD X3,R8 - MOVD X2,R9 - PSHUFL $0X39,X13,X13 - PSHUFL $0X39,X9,X9 - PSHUFL $0X39,X3,X3 - PSHUFL $0X39,X2,X2 - XORL 112(SI),DX - XORL 116(SI),CX - XORL 120(SI),R8 - XORL 124(SI),R9 - MOVL DX,112(DI) - MOVL CX,116(DI) - MOVL R8,120(DI) - MOVL R9,124(DI) - MOVD X13,DX - MOVD X9,CX - MOVD X3,R8 - MOVD X2,R9 - PSHUFL $0X39,X13,X13 - PSHUFL $0X39,X9,X9 - PSHUFL $0X39,X3,X3 - PSHUFL $0X39,X2,X2 - XORL 176(SI),DX - XORL 180(SI),CX - XORL 184(SI),R8 - XORL 188(SI),R9 - MOVL DX,176(DI) - MOVL CX,180(DI) - MOVL R8,184(DI) - MOVL R9,188(DI) - MOVD X13,DX - MOVD X9,CX - MOVD X3,R8 - MOVD X2,R9 - XORL 240(SI),DX - XORL 244(SI),CX - XORL 248(SI),R8 - XORL 252(SI),R9 - MOVL DX,240(DI) - MOVL CX,244(DI) - MOVL R8,248(DI) - MOVL R9,252(DI) - MOVQ 352(R12),R9 - SUBQ $256,R9 - ADDQ $256,SI - ADDQ $256,DI - CMPQ R9,$256 - JAE BYTESATLEAST256 - CMPQ R9,$0 - JBE DONE - BYTESBETWEEN1AND255: - CMPQ R9,$64 - JAE NOCOPY - MOVQ DI,DX - LEAQ 360(R12),DI - MOVQ R9,CX - REP; MOVSB - LEAQ 360(R12),DI - LEAQ 360(R12),SI - NOCOPY: - MOVQ R9,352(R12) - MOVOA 48(R12),X0 - MOVOA 0(R12),X1 - MOVOA 16(R12),X2 - MOVOA 32(R12),X3 - MOVOA X1,X4 - MOVQ $20,CX - MAINLOOP2: - PADDL X0,X4 - MOVOA X0,X5 - MOVOA X4,X6 - PSLLL $7,X4 - PSRLL $25,X6 - PXOR X4,X3 - PXOR X6,X3 - PADDL X3,X5 - MOVOA X3,X4 - MOVOA X5,X6 - PSLLL $9,X5 - PSRLL $23,X6 - PXOR X5,X2 - PSHUFL $0X93,X3,X3 - PXOR X6,X2 - PADDL X2,X4 - MOVOA X2,X5 - MOVOA X4,X6 - PSLLL $13,X4 - PSRLL $19,X6 - PXOR X4,X1 - PSHUFL $0X4E,X2,X2 - PXOR X6,X1 - PADDL X1,X5 - MOVOA X3,X4 - MOVOA X5,X6 - PSLLL $18,X5 - PSRLL $14,X6 - PXOR X5,X0 - PSHUFL $0X39,X1,X1 - PXOR X6,X0 - PADDL X0,X4 - MOVOA X0,X5 - MOVOA X4,X6 - PSLLL $7,X4 - PSRLL $25,X6 - PXOR X4,X1 - PXOR X6,X1 - PADDL X1,X5 - MOVOA X1,X4 - MOVOA X5,X6 - PSLLL $9,X5 - PSRLL $23,X6 - PXOR X5,X2 - PSHUFL $0X93,X1,X1 - PXOR X6,X2 - PADDL X2,X4 - MOVOA X2,X5 - MOVOA X4,X6 - PSLLL $13,X4 - PSRLL $19,X6 - PXOR X4,X3 - PSHUFL $0X4E,X2,X2 - PXOR X6,X3 - PADDL X3,X5 - MOVOA X1,X4 - MOVOA X5,X6 - PSLLL $18,X5 - PSRLL $14,X6 - PXOR X5,X0 - PSHUFL $0X39,X3,X3 - PXOR X6,X0 - PADDL X0,X4 - MOVOA X0,X5 - MOVOA X4,X6 - PSLLL $7,X4 - PSRLL $25,X6 - PXOR X4,X3 - PXOR X6,X3 - PADDL X3,X5 - MOVOA X3,X4 - MOVOA X5,X6 - PSLLL $9,X5 - PSRLL $23,X6 - PXOR X5,X2 - PSHUFL $0X93,X3,X3 - PXOR X6,X2 - PADDL X2,X4 - MOVOA X2,X5 - MOVOA X4,X6 - PSLLL $13,X4 - PSRLL $19,X6 - PXOR X4,X1 - PSHUFL $0X4E,X2,X2 - PXOR X6,X1 - PADDL X1,X5 - MOVOA X3,X4 - MOVOA X5,X6 - PSLLL $18,X5 - PSRLL $14,X6 - PXOR X5,X0 - PSHUFL $0X39,X1,X1 - PXOR X6,X0 - PADDL X0,X4 - MOVOA X0,X5 - MOVOA X4,X6 - PSLLL $7,X4 - PSRLL $25,X6 - PXOR X4,X1 - PXOR X6,X1 - PADDL X1,X5 - MOVOA X1,X4 - MOVOA X5,X6 - PSLLL $9,X5 - PSRLL $23,X6 - PXOR X5,X2 - PSHUFL $0X93,X1,X1 - PXOR X6,X2 - PADDL X2,X4 - MOVOA X2,X5 - MOVOA X4,X6 - PSLLL $13,X4 - PSRLL $19,X6 - PXOR X4,X3 - PSHUFL $0X4E,X2,X2 - PXOR X6,X3 - SUBQ $4,CX - PADDL X3,X5 - MOVOA X1,X4 - MOVOA X5,X6 - PSLLL $18,X5 - PXOR X7,X7 - PSRLL $14,X6 - PXOR X5,X0 - PSHUFL $0X39,X3,X3 - PXOR X6,X0 - JA MAINLOOP2 - PADDL 48(R12),X0 - PADDL 0(R12),X1 - PADDL 16(R12),X2 - PADDL 32(R12),X3 - MOVD X0,CX - MOVD X1,R8 - MOVD X2,R9 - MOVD X3,AX - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X2,X2 - PSHUFL $0X39,X3,X3 - XORL 0(SI),CX - XORL 48(SI),R8 - XORL 32(SI),R9 - XORL 16(SI),AX - MOVL CX,0(DI) - MOVL R8,48(DI) - MOVL R9,32(DI) - MOVL AX,16(DI) - MOVD X0,CX - MOVD X1,R8 - MOVD X2,R9 - MOVD X3,AX - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X2,X2 - PSHUFL $0X39,X3,X3 - XORL 20(SI),CX - XORL 4(SI),R8 - XORL 52(SI),R9 - XORL 36(SI),AX - MOVL CX,20(DI) - MOVL R8,4(DI) - MOVL R9,52(DI) - MOVL AX,36(DI) - MOVD X0,CX - MOVD X1,R8 - MOVD X2,R9 - MOVD X3,AX - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X2,X2 - PSHUFL $0X39,X3,X3 - XORL 40(SI),CX - XORL 24(SI),R8 - XORL 8(SI),R9 - XORL 56(SI),AX - MOVL CX,40(DI) - MOVL R8,24(DI) - MOVL R9,8(DI) - MOVL AX,56(DI) - MOVD X0,CX - MOVD X1,R8 - MOVD X2,R9 - MOVD X3,AX - XORL 60(SI),CX - XORL 44(SI),R8 - XORL 28(SI),R9 - XORL 12(SI),AX - MOVL CX,60(DI) - MOVL R8,44(DI) - MOVL R9,28(DI) - MOVL AX,12(DI) - MOVQ 352(R12),R9 - MOVL 16(R12),CX - MOVL 36 (R12),R8 - ADDQ $1,CX - SHLQ $32,R8 - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $32,R8 - MOVL CX,16(R12) - MOVL R8, 36 (R12) - CMPQ R9,$64 - JA BYTESATLEAST65 - JAE BYTESATLEAST64 - MOVQ DI,SI - MOVQ DX,DI - MOVQ R9,CX - REP; MOVSB - BYTESATLEAST64: - DONE: - RET - BYTESATLEAST65: - SUBQ $64,R9 - ADDQ $64,DI - ADDQ $64,SI - JMP BYTESBETWEEN1AND255 diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go deleted file mode 100644 index 4392cc1a..00000000 --- a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !amd64 || purego || !gc -// +build !amd64 purego !gc - -package salsa - -// XORKeyStream crypts bytes from in to out using the given key and counters. -// In and out must overlap entirely or not at all. Counter -// contains the raw salsa20 counter bytes (both nonce and block counter). -func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { - genericXORKeyStream(out, in, counter, key) -} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go deleted file mode 100644 index e5cdb9a2..00000000 --- a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package salsa - -import "math/bits" - -const rounds = 20 - -// core applies the Salsa20 core function to 16-byte input in, 32-byte key k, -// and 16-byte constant c, and puts the result into 64-byte array out. -func core(out *[64]byte, in *[16]byte, k *[32]byte, c *[16]byte) { - j0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24 - j1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24 - j2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24 - j3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24 - j4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24 - j5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24 - j6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 - j7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 - j8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 - j9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 - j10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24 - j11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24 - j12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24 - j13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24 - j14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24 - j15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24 - - x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8 - x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15 - - for i := 0; i < rounds; i += 2 { - u := x0 + x12 - x4 ^= bits.RotateLeft32(u, 7) - u = x4 + x0 - x8 ^= bits.RotateLeft32(u, 9) - u = x8 + x4 - x12 ^= bits.RotateLeft32(u, 13) - u = x12 + x8 - x0 ^= bits.RotateLeft32(u, 18) - - u = x5 + x1 - x9 ^= bits.RotateLeft32(u, 7) - u = x9 + x5 - x13 ^= bits.RotateLeft32(u, 9) - u = x13 + x9 - x1 ^= bits.RotateLeft32(u, 13) - u = x1 + x13 - x5 ^= bits.RotateLeft32(u, 18) - - u = x10 + x6 - x14 ^= bits.RotateLeft32(u, 7) - u = x14 + x10 - x2 ^= bits.RotateLeft32(u, 9) - u = x2 + x14 - x6 ^= bits.RotateLeft32(u, 13) - u = x6 + x2 - x10 ^= bits.RotateLeft32(u, 18) - - u = x15 + x11 - x3 ^= bits.RotateLeft32(u, 7) - u = x3 + x15 - x7 ^= bits.RotateLeft32(u, 9) - u = x7 + x3 - x11 ^= bits.RotateLeft32(u, 13) - u = x11 + x7 - x15 ^= bits.RotateLeft32(u, 18) - - u = x0 + x3 - x1 ^= bits.RotateLeft32(u, 7) - u = x1 + x0 - x2 ^= bits.RotateLeft32(u, 9) - u = x2 + x1 - x3 ^= bits.RotateLeft32(u, 13) - u = x3 + x2 - x0 ^= bits.RotateLeft32(u, 18) - - u = x5 + x4 - x6 ^= bits.RotateLeft32(u, 7) - u = x6 + x5 - x7 ^= bits.RotateLeft32(u, 9) - u = x7 + x6 - x4 ^= bits.RotateLeft32(u, 13) - u = x4 + x7 - x5 ^= bits.RotateLeft32(u, 18) - - u = x10 + x9 - x11 ^= bits.RotateLeft32(u, 7) - u = x11 + x10 - x8 ^= bits.RotateLeft32(u, 9) - u = x8 + x11 - x9 ^= bits.RotateLeft32(u, 13) - u = x9 + x8 - x10 ^= bits.RotateLeft32(u, 18) - - u = x15 + x14 - x12 ^= bits.RotateLeft32(u, 7) - u = x12 + x15 - x13 ^= bits.RotateLeft32(u, 9) - u = x13 + x12 - x14 ^= bits.RotateLeft32(u, 13) - u = x14 + x13 - x15 ^= bits.RotateLeft32(u, 18) - } - x0 += j0 - x1 += j1 - x2 += j2 - x3 += j3 - x4 += j4 - x5 += j5 - x6 += j6 - x7 += j7 - x8 += j8 - x9 += j9 - x10 += j10 - x11 += j11 - x12 += j12 - x13 += j13 - x14 += j14 - x15 += j15 - - out[0] = byte(x0) - out[1] = byte(x0 >> 8) - out[2] = byte(x0 >> 16) - out[3] = byte(x0 >> 24) - - out[4] = byte(x1) - out[5] = byte(x1 >> 8) - out[6] = byte(x1 >> 16) - out[7] = byte(x1 >> 24) - - out[8] = byte(x2) - out[9] = byte(x2 >> 8) - out[10] = byte(x2 >> 16) - out[11] = byte(x2 >> 24) - - out[12] = byte(x3) - out[13] = byte(x3 >> 8) - out[14] = byte(x3 >> 16) - out[15] = byte(x3 >> 24) - - out[16] = byte(x4) - out[17] = byte(x4 >> 8) - out[18] = byte(x4 >> 16) - out[19] = byte(x4 >> 24) - - out[20] = byte(x5) - out[21] = byte(x5 >> 8) - out[22] = byte(x5 >> 16) - out[23] = byte(x5 >> 24) - - out[24] = byte(x6) - out[25] = byte(x6 >> 8) - out[26] = byte(x6 >> 16) - out[27] = byte(x6 >> 24) - - out[28] = byte(x7) - out[29] = byte(x7 >> 8) - out[30] = byte(x7 >> 16) - out[31] = byte(x7 >> 24) - - out[32] = byte(x8) - out[33] = byte(x8 >> 8) - out[34] = byte(x8 >> 16) - out[35] = byte(x8 >> 24) - - out[36] = byte(x9) - out[37] = byte(x9 >> 8) - out[38] = byte(x9 >> 16) - out[39] = byte(x9 >> 24) - - out[40] = byte(x10) - out[41] = byte(x10 >> 8) - out[42] = byte(x10 >> 16) - out[43] = byte(x10 >> 24) - - out[44] = byte(x11) - out[45] = byte(x11 >> 8) - out[46] = byte(x11 >> 16) - out[47] = byte(x11 >> 24) - - out[48] = byte(x12) - out[49] = byte(x12 >> 8) - out[50] = byte(x12 >> 16) - out[51] = byte(x12 >> 24) - - out[52] = byte(x13) - out[53] = byte(x13 >> 8) - out[54] = byte(x13 >> 16) - out[55] = byte(x13 >> 24) - - out[56] = byte(x14) - out[57] = byte(x14 >> 8) - out[58] = byte(x14 >> 16) - out[59] = byte(x14 >> 24) - - out[60] = byte(x15) - out[61] = byte(x15 >> 8) - out[62] = byte(x15 >> 16) - out[63] = byte(x15 >> 24) -} - -// genericXORKeyStream is the generic implementation of XORKeyStream to be used -// when no assembly implementation is available. -func genericXORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { - var block [64]byte - var counterCopy [16]byte - copy(counterCopy[:], counter[:]) - - for len(in) >= 64 { - core(&block, &counterCopy, key, &Sigma) - for i, x := range block { - out[i] = in[i] ^ x - } - u := uint32(1) - for i := 8; i < 16; i++ { - u += uint32(counterCopy[i]) - counterCopy[i] = byte(u) - u >>= 8 - } - in = in[64:] - out = out[64:] - } - - if len(in) > 0 { - core(&block, &counterCopy, key, &Sigma) - for i, v := range in { - out[i] = v ^ block[i] - } - } -} diff --git a/vendor/golang.org/x/net/internal/socks/client.go b/vendor/golang.org/x/net/internal/socks/client.go deleted file mode 100644 index 3d6f516a..00000000 --- a/vendor/golang.org/x/net/internal/socks/client.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package socks - -import ( - "context" - "errors" - "io" - "net" - "strconv" - "time" -) - -var ( - noDeadline = time.Time{} - aLongTimeAgo = time.Unix(1, 0) -) - -func (d *Dialer) connect(ctx context.Context, c net.Conn, address string) (_ net.Addr, ctxErr error) { - host, port, err := splitHostPort(address) - if err != nil { - return nil, err - } - if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() { - c.SetDeadline(deadline) - defer c.SetDeadline(noDeadline) - } - if ctx != context.Background() { - errCh := make(chan error, 1) - done := make(chan struct{}) - defer func() { - close(done) - if ctxErr == nil { - ctxErr = <-errCh - } - }() - go func() { - select { - case <-ctx.Done(): - c.SetDeadline(aLongTimeAgo) - errCh <- ctx.Err() - case <-done: - errCh <- nil - } - }() - } - - b := make([]byte, 0, 6+len(host)) // the size here is just an estimate - b = append(b, Version5) - if len(d.AuthMethods) == 0 || d.Authenticate == nil { - b = append(b, 1, byte(AuthMethodNotRequired)) - } else { - ams := d.AuthMethods - if len(ams) > 255 { - return nil, errors.New("too many authentication methods") - } - b = append(b, byte(len(ams))) - for _, am := range ams { - b = append(b, byte(am)) - } - } - if _, ctxErr = c.Write(b); ctxErr != nil { - return - } - - if _, ctxErr = io.ReadFull(c, b[:2]); ctxErr != nil { - return - } - if b[0] != Version5 { - return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) - } - am := AuthMethod(b[1]) - if am == AuthMethodNoAcceptableMethods { - return nil, errors.New("no acceptable authentication methods") - } - if d.Authenticate != nil { - if ctxErr = d.Authenticate(ctx, c, am); ctxErr != nil { - return - } - } - - b = b[:0] - b = append(b, Version5, byte(d.cmd), 0) - if ip := net.ParseIP(host); ip != nil { - if ip4 := ip.To4(); ip4 != nil { - b = append(b, AddrTypeIPv4) - b = append(b, ip4...) - } else if ip6 := ip.To16(); ip6 != nil { - b = append(b, AddrTypeIPv6) - b = append(b, ip6...) - } else { - return nil, errors.New("unknown address type") - } - } else { - if len(host) > 255 { - return nil, errors.New("FQDN too long") - } - b = append(b, AddrTypeFQDN) - b = append(b, byte(len(host))) - b = append(b, host...) - } - b = append(b, byte(port>>8), byte(port)) - if _, ctxErr = c.Write(b); ctxErr != nil { - return - } - - if _, ctxErr = io.ReadFull(c, b[:4]); ctxErr != nil { - return - } - if b[0] != Version5 { - return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) - } - if cmdErr := Reply(b[1]); cmdErr != StatusSucceeded { - return nil, errors.New("unknown error " + cmdErr.String()) - } - if b[2] != 0 { - return nil, errors.New("non-zero reserved field") - } - l := 2 - var a Addr - switch b[3] { - case AddrTypeIPv4: - l += net.IPv4len - a.IP = make(net.IP, net.IPv4len) - case AddrTypeIPv6: - l += net.IPv6len - a.IP = make(net.IP, net.IPv6len) - case AddrTypeFQDN: - if _, err := io.ReadFull(c, b[:1]); err != nil { - return nil, err - } - l += int(b[0]) - default: - return nil, errors.New("unknown address type " + strconv.Itoa(int(b[3]))) - } - if cap(b) < l { - b = make([]byte, l) - } else { - b = b[:l] - } - if _, ctxErr = io.ReadFull(c, b); ctxErr != nil { - return - } - if a.IP != nil { - copy(a.IP, b) - } else { - a.Name = string(b[:len(b)-2]) - } - a.Port = int(b[len(b)-2])<<8 | int(b[len(b)-1]) - return &a, nil -} - -func splitHostPort(address string) (string, int, error) { - host, port, err := net.SplitHostPort(address) - if err != nil { - return "", 0, err - } - portnum, err := strconv.Atoi(port) - if err != nil { - return "", 0, err - } - if 1 > portnum || portnum > 0xffff { - return "", 0, errors.New("port number out of range " + port) - } - return host, portnum, nil -} diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go deleted file mode 100644 index 84fcc32b..00000000 --- a/vendor/golang.org/x/net/internal/socks/socks.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package socks provides a SOCKS version 5 client implementation. -// -// SOCKS protocol version 5 is defined in RFC 1928. -// Username/Password authentication for SOCKS version 5 is defined in -// RFC 1929. -package socks - -import ( - "context" - "errors" - "io" - "net" - "strconv" -) - -// A Command represents a SOCKS command. -type Command int - -func (cmd Command) String() string { - switch cmd { - case CmdConnect: - return "socks connect" - case cmdBind: - return "socks bind" - default: - return "socks " + strconv.Itoa(int(cmd)) - } -} - -// An AuthMethod represents a SOCKS authentication method. -type AuthMethod int - -// A Reply represents a SOCKS command reply code. -type Reply int - -func (code Reply) String() string { - switch code { - case StatusSucceeded: - return "succeeded" - case 0x01: - return "general SOCKS server failure" - case 0x02: - return "connection not allowed by ruleset" - case 0x03: - return "network unreachable" - case 0x04: - return "host unreachable" - case 0x05: - return "connection refused" - case 0x06: - return "TTL expired" - case 0x07: - return "command not supported" - case 0x08: - return "address type not supported" - default: - return "unknown code: " + strconv.Itoa(int(code)) - } -} - -// Wire protocol constants. -const ( - Version5 = 0x05 - - AddrTypeIPv4 = 0x01 - AddrTypeFQDN = 0x03 - AddrTypeIPv6 = 0x04 - - CmdConnect Command = 0x01 // establishes an active-open forward proxy connection - cmdBind Command = 0x02 // establishes a passive-open forward proxy connection - - AuthMethodNotRequired AuthMethod = 0x00 // no authentication required - AuthMethodUsernamePassword AuthMethod = 0x02 // use username/password - AuthMethodNoAcceptableMethods AuthMethod = 0xff // no acceptable authentication methods - - StatusSucceeded Reply = 0x00 -) - -// An Addr represents a SOCKS-specific address. -// Either Name or IP is used exclusively. -type Addr struct { - Name string // fully-qualified domain name - IP net.IP - Port int -} - -func (a *Addr) Network() string { return "socks" } - -func (a *Addr) String() string { - if a == nil { - return "" - } - port := strconv.Itoa(a.Port) - if a.IP == nil { - return net.JoinHostPort(a.Name, port) - } - return net.JoinHostPort(a.IP.String(), port) -} - -// A Conn represents a forward proxy connection. -type Conn struct { - net.Conn - - boundAddr net.Addr -} - -// BoundAddr returns the address assigned by the proxy server for -// connecting to the command target address from the proxy server. -func (c *Conn) BoundAddr() net.Addr { - if c == nil { - return nil - } - return c.boundAddr -} - -// A Dialer holds SOCKS-specific options. -type Dialer struct { - cmd Command // either CmdConnect or cmdBind - proxyNetwork string // network between a proxy server and a client - proxyAddress string // proxy server address - - // ProxyDial specifies the optional dial function for - // establishing the transport connection. - ProxyDial func(context.Context, string, string) (net.Conn, error) - - // AuthMethods specifies the list of request authentication - // methods. - // If empty, SOCKS client requests only AuthMethodNotRequired. - AuthMethods []AuthMethod - - // Authenticate specifies the optional authentication - // function. It must be non-nil when AuthMethods is not empty. - // It must return an error when the authentication is failed. - Authenticate func(context.Context, io.ReadWriter, AuthMethod) error -} - -// DialContext connects to the provided address on the provided -// network. -// -// The returned error value may be a net.OpError. When the Op field of -// net.OpError contains "socks", the Source field contains a proxy -// server address and the Addr field contains a command target -// address. -// -// See func Dial of the net package of standard library for a -// description of the network and address parameters. -func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { - if err := d.validateTarget(network, address); err != nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - if ctx == nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} - } - var err error - var c net.Conn - if d.ProxyDial != nil { - c, err = d.ProxyDial(ctx, d.proxyNetwork, d.proxyAddress) - } else { - var dd net.Dialer - c, err = dd.DialContext(ctx, d.proxyNetwork, d.proxyAddress) - } - if err != nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - a, err := d.connect(ctx, c, address) - if err != nil { - c.Close() - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - return &Conn{Conn: c, boundAddr: a}, nil -} - -// DialWithConn initiates a connection from SOCKS server to the target -// network and address using the connection c that is already -// connected to the SOCKS server. -// -// It returns the connection's local address assigned by the SOCKS -// server. -func (d *Dialer) DialWithConn(ctx context.Context, c net.Conn, network, address string) (net.Addr, error) { - if err := d.validateTarget(network, address); err != nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - if ctx == nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} - } - a, err := d.connect(ctx, c, address) - if err != nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - return a, nil -} - -// Dial connects to the provided address on the provided network. -// -// Unlike DialContext, it returns a raw transport connection instead -// of a forward proxy connection. -// -// Deprecated: Use DialContext or DialWithConn instead. -func (d *Dialer) Dial(network, address string) (net.Conn, error) { - if err := d.validateTarget(network, address); err != nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - var err error - var c net.Conn - if d.ProxyDial != nil { - c, err = d.ProxyDial(context.Background(), d.proxyNetwork, d.proxyAddress) - } else { - c, err = net.Dial(d.proxyNetwork, d.proxyAddress) - } - if err != nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - if _, err := d.DialWithConn(context.Background(), c, network, address); err != nil { - c.Close() - return nil, err - } - return c, nil -} - -func (d *Dialer) validateTarget(network, address string) error { - switch network { - case "tcp", "tcp6", "tcp4": - default: - return errors.New("network not implemented") - } - switch d.cmd { - case CmdConnect, cmdBind: - default: - return errors.New("command not implemented") - } - return nil -} - -func (d *Dialer) pathAddrs(address string) (proxy, dst net.Addr, err error) { - for i, s := range []string{d.proxyAddress, address} { - host, port, err := splitHostPort(s) - if err != nil { - return nil, nil, err - } - a := &Addr{Port: port} - a.IP = net.ParseIP(host) - if a.IP == nil { - a.Name = host - } - if i == 0 { - proxy = a - } else { - dst = a - } - } - return -} - -// NewDialer returns a new Dialer that dials through the provided -// proxy server's network and address. -func NewDialer(network, address string) *Dialer { - return &Dialer{proxyNetwork: network, proxyAddress: address, cmd: CmdConnect} -} - -const ( - authUsernamePasswordVersion = 0x01 - authStatusSucceeded = 0x00 -) - -// UsernamePassword are the credentials for the username/password -// authentication method. -type UsernamePassword struct { - Username string - Password string -} - -// Authenticate authenticates a pair of username and password with the -// proxy server. -func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter, auth AuthMethod) error { - switch auth { - case AuthMethodNotRequired: - return nil - case AuthMethodUsernamePassword: - if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) > 255 { - return errors.New("invalid username/password") - } - b := []byte{authUsernamePasswordVersion} - b = append(b, byte(len(up.Username))) - b = append(b, up.Username...) - b = append(b, byte(len(up.Password))) - b = append(b, up.Password...) - // TODO(mikio): handle IO deadlines and cancelation if - // necessary - if _, err := rw.Write(b); err != nil { - return err - } - if _, err := io.ReadFull(rw, b[:2]); err != nil { - return err - } - if b[0] != authUsernamePasswordVersion { - return errors.New("invalid username/password version") - } - if b[1] != authStatusSucceeded { - return errors.New("username/password authentication failed") - } - return nil - } - return errors.New("unsupported authentication method " + strconv.Itoa(int(auth))) -} diff --git a/vendor/golang.org/x/net/proxy/dial.go b/vendor/golang.org/x/net/proxy/dial.go deleted file mode 100644 index 811c2e4e..00000000 --- a/vendor/golang.org/x/net/proxy/dial.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proxy - -import ( - "context" - "net" -) - -// A ContextDialer dials using a context. -type ContextDialer interface { - DialContext(ctx context.Context, network, address string) (net.Conn, error) -} - -// Dial works like DialContext on net.Dialer but using a dialer returned by FromEnvironment. -// -// The passed ctx is only used for returning the Conn, not the lifetime of the Conn. -// -// Custom dialers (registered via RegisterDialerType) that do not implement ContextDialer -// can leak a goroutine for as long as it takes the underlying Dialer implementation to timeout. -// -// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. -func Dial(ctx context.Context, network, address string) (net.Conn, error) { - d := FromEnvironment() - if xd, ok := d.(ContextDialer); ok { - return xd.DialContext(ctx, network, address) - } - return dialContext(ctx, d, network, address) -} - -// WARNING: this can leak a goroutine for as long as the underlying Dialer implementation takes to timeout -// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. -func dialContext(ctx context.Context, d Dialer, network, address string) (net.Conn, error) { - var ( - conn net.Conn - done = make(chan struct{}, 1) - err error - ) - go func() { - conn, err = d.Dial(network, address) - close(done) - if conn != nil && ctx.Err() != nil { - conn.Close() - } - }() - select { - case <-ctx.Done(): - err = ctx.Err() - case <-done: - } - return conn, err -} diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go deleted file mode 100644 index 3d66bdef..00000000 --- a/vendor/golang.org/x/net/proxy/direct.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proxy - -import ( - "context" - "net" -) - -type direct struct{} - -// Direct implements Dialer by making network connections directly using net.Dial or net.DialContext. -var Direct = direct{} - -var ( - _ Dialer = Direct - _ ContextDialer = Direct -) - -// Dial directly invokes net.Dial with the supplied parameters. -func (direct) Dial(network, addr string) (net.Conn, error) { - return net.Dial(network, addr) -} - -// DialContext instantiates a net.Dialer and invokes its DialContext receiver with the supplied parameters. -func (direct) DialContext(ctx context.Context, network, addr string) (net.Conn, error) { - var d net.Dialer - return d.DialContext(ctx, network, addr) -} diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go deleted file mode 100644 index 573fe79e..00000000 --- a/vendor/golang.org/x/net/proxy/per_host.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proxy - -import ( - "context" - "net" - "strings" -) - -// A PerHost directs connections to a default Dialer unless the host name -// requested matches one of a number of exceptions. -type PerHost struct { - def, bypass Dialer - - bypassNetworks []*net.IPNet - bypassIPs []net.IP - bypassZones []string - bypassHosts []string -} - -// NewPerHost returns a PerHost Dialer that directs connections to either -// defaultDialer or bypass, depending on whether the connection matches one of -// the configured rules. -func NewPerHost(defaultDialer, bypass Dialer) *PerHost { - return &PerHost{ - def: defaultDialer, - bypass: bypass, - } -} - -// Dial connects to the address addr on the given network through either -// defaultDialer or bypass. -func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - - return p.dialerForRequest(host).Dial(network, addr) -} - -// DialContext connects to the address addr on the given network through either -// defaultDialer or bypass. -func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net.Conn, err error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - d := p.dialerForRequest(host) - if x, ok := d.(ContextDialer); ok { - return x.DialContext(ctx, network, addr) - } - return dialContext(ctx, d, network, addr) -} - -func (p *PerHost) dialerForRequest(host string) Dialer { - if ip := net.ParseIP(host); ip != nil { - for _, net := range p.bypassNetworks { - if net.Contains(ip) { - return p.bypass - } - } - for _, bypassIP := range p.bypassIPs { - if bypassIP.Equal(ip) { - return p.bypass - } - } - return p.def - } - - for _, zone := range p.bypassZones { - if strings.HasSuffix(host, zone) { - return p.bypass - } - if host == zone[1:] { - // For a zone ".example.com", we match "example.com" - // too. - return p.bypass - } - } - for _, bypassHost := range p.bypassHosts { - if bypassHost == host { - return p.bypass - } - } - return p.def -} - -// AddFromString parses a string that contains comma-separated values -// specifying hosts that should use the bypass proxy. Each value is either an -// IP address, a CIDR range, a zone (*.example.com) or a host name -// (localhost). A best effort is made to parse the string and errors are -// ignored. -func (p *PerHost) AddFromString(s string) { - hosts := strings.Split(s, ",") - for _, host := range hosts { - host = strings.TrimSpace(host) - if len(host) == 0 { - continue - } - if strings.Contains(host, "/") { - // We assume that it's a CIDR address like 127.0.0.0/8 - if _, net, err := net.ParseCIDR(host); err == nil { - p.AddNetwork(net) - } - continue - } - if ip := net.ParseIP(host); ip != nil { - p.AddIP(ip) - continue - } - if strings.HasPrefix(host, "*.") { - p.AddZone(host[1:]) - continue - } - p.AddHost(host) - } -} - -// AddIP specifies an IP address that will use the bypass proxy. Note that -// this will only take effect if a literal IP address is dialed. A connection -// to a named host will never match an IP. -func (p *PerHost) AddIP(ip net.IP) { - p.bypassIPs = append(p.bypassIPs, ip) -} - -// AddNetwork specifies an IP range that will use the bypass proxy. Note that -// this will only take effect if a literal IP address is dialed. A connection -// to a named host will never match. -func (p *PerHost) AddNetwork(net *net.IPNet) { - p.bypassNetworks = append(p.bypassNetworks, net) -} - -// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of -// "example.com" matches "example.com" and all of its subdomains. -func (p *PerHost) AddZone(zone string) { - if strings.HasSuffix(zone, ".") { - zone = zone[:len(zone)-1] - } - if !strings.HasPrefix(zone, ".") { - zone = "." + zone - } - p.bypassZones = append(p.bypassZones, zone) -} - -// AddHost specifies a host name that will use the bypass proxy. -func (p *PerHost) AddHost(host string) { - if strings.HasSuffix(host, ".") { - host = host[:len(host)-1] - } - p.bypassHosts = append(p.bypassHosts, host) -} diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go deleted file mode 100644 index 9ff4b9a7..00000000 --- a/vendor/golang.org/x/net/proxy/proxy.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package proxy provides support for a variety of protocols to proxy network -// data. -package proxy // import "golang.org/x/net/proxy" - -import ( - "errors" - "net" - "net/url" - "os" - "sync" -) - -// A Dialer is a means to establish a connection. -// Custom dialers should also implement ContextDialer. -type Dialer interface { - // Dial connects to the given address via the proxy. - Dial(network, addr string) (c net.Conn, err error) -} - -// Auth contains authentication parameters that specific Dialers may require. -type Auth struct { - User, Password string -} - -// FromEnvironment returns the dialer specified by the proxy-related -// variables in the environment and makes underlying connections -// directly. -func FromEnvironment() Dialer { - return FromEnvironmentUsing(Direct) -} - -// FromEnvironmentUsing returns the dialer specify by the proxy-related -// variables in the environment and makes underlying connections -// using the provided forwarding Dialer (for instance, a *net.Dialer -// with desired configuration). -func FromEnvironmentUsing(forward Dialer) Dialer { - allProxy := allProxyEnv.Get() - if len(allProxy) == 0 { - return forward - } - - proxyURL, err := url.Parse(allProxy) - if err != nil { - return forward - } - proxy, err := FromURL(proxyURL, forward) - if err != nil { - return forward - } - - noProxy := noProxyEnv.Get() - if len(noProxy) == 0 { - return proxy - } - - perHost := NewPerHost(proxy, forward) - perHost.AddFromString(noProxy) - return perHost -} - -// proxySchemes is a map from URL schemes to a function that creates a Dialer -// from a URL with such a scheme. -var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error) - -// RegisterDialerType takes a URL scheme and a function to generate Dialers from -// a URL with that scheme and a forwarding Dialer. Registered schemes are used -// by FromURL. -func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) { - if proxySchemes == nil { - proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error)) - } - proxySchemes[scheme] = f -} - -// FromURL returns a Dialer given a URL specification and an underlying -// Dialer for it to make network requests. -func FromURL(u *url.URL, forward Dialer) (Dialer, error) { - var auth *Auth - if u.User != nil { - auth = new(Auth) - auth.User = u.User.Username() - if p, ok := u.User.Password(); ok { - auth.Password = p - } - } - - switch u.Scheme { - case "socks5", "socks5h": - addr := u.Hostname() - port := u.Port() - if port == "" { - port = "1080" - } - return SOCKS5("tcp", net.JoinHostPort(addr, port), auth, forward) - } - - // If the scheme doesn't match any of the built-in schemes, see if it - // was registered by another package. - if proxySchemes != nil { - if f, ok := proxySchemes[u.Scheme]; ok { - return f(u, forward) - } - } - - return nil, errors.New("proxy: unknown scheme: " + u.Scheme) -} - -var ( - allProxyEnv = &envOnce{ - names: []string{"ALL_PROXY", "all_proxy"}, - } - noProxyEnv = &envOnce{ - names: []string{"NO_PROXY", "no_proxy"}, - } -) - -// envOnce looks up an environment variable (optionally by multiple -// names) once. It mitigates expensive lookups on some platforms -// (e.g. Windows). -// (Borrowed from net/http/transport.go) -type envOnce struct { - names []string - once sync.Once - val string -} - -func (e *envOnce) Get() string { - e.once.Do(e.init) - return e.val -} - -func (e *envOnce) init() { - for _, n := range e.names { - e.val = os.Getenv(n) - if e.val != "" { - return - } - } -} - -// reset is used by tests -func (e *envOnce) reset() { - e.once = sync.Once{} - e.val = "" -} diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go deleted file mode 100644 index c91651f9..00000000 --- a/vendor/golang.org/x/net/proxy/socks5.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proxy - -import ( - "context" - "net" - - "golang.org/x/net/internal/socks" -) - -// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given -// address with an optional username and password. -// See RFC 1928 and RFC 1929. -func SOCKS5(network, address string, auth *Auth, forward Dialer) (Dialer, error) { - d := socks.NewDialer(network, address) - if forward != nil { - if f, ok := forward.(ContextDialer); ok { - d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { - return f.DialContext(ctx, network, address) - } - } else { - d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { - return dialContext(ctx, forward, network, address) - } - } - } - if auth != nil { - up := socks.UsernamePassword{ - Username: auth.User, - Password: auth.Password, - } - d.AuthMethods = []socks.AuthMethod{ - socks.AuthMethodNotRequired, - socks.AuthMethodUsernamePassword, - } - d.Authenticate = up.Authenticate - } - return d, nil -} diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go deleted file mode 100644 index 30f632c5..00000000 --- a/vendor/golang.org/x/sync/semaphore/semaphore.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package semaphore provides a weighted semaphore implementation. -package semaphore // import "golang.org/x/sync/semaphore" - -import ( - "container/list" - "context" - "sync" -) - -type waiter struct { - n int64 - ready chan<- struct{} // Closed when semaphore acquired. -} - -// NewWeighted creates a new weighted semaphore with the given -// maximum combined weight for concurrent access. -func NewWeighted(n int64) *Weighted { - w := &Weighted{size: n} - return w -} - -// Weighted provides a way to bound concurrent access to a resource. -// The callers can request access with a given weight. -type Weighted struct { - size int64 - cur int64 - mu sync.Mutex - waiters list.List -} - -// Acquire acquires the semaphore with a weight of n, blocking until resources -// are available or ctx is done. On success, returns nil. On failure, returns -// ctx.Err() and leaves the semaphore unchanged. -// -// If ctx is already done, Acquire may still succeed without blocking. -func (s *Weighted) Acquire(ctx context.Context, n int64) error { - s.mu.Lock() - if s.size-s.cur >= n && s.waiters.Len() == 0 { - s.cur += n - s.mu.Unlock() - return nil - } - - if n > s.size { - // Don't make other Acquire calls block on one that's doomed to fail. - s.mu.Unlock() - <-ctx.Done() - return ctx.Err() - } - - ready := make(chan struct{}) - w := waiter{n: n, ready: ready} - elem := s.waiters.PushBack(w) - s.mu.Unlock() - - select { - case <-ctx.Done(): - err := ctx.Err() - s.mu.Lock() - select { - case <-ready: - // Acquired the semaphore after we were canceled. Rather than trying to - // fix up the queue, just pretend we didn't notice the cancelation. - err = nil - default: - isFront := s.waiters.Front() == elem - s.waiters.Remove(elem) - // If we're at the front and there're extra tokens left, notify other waiters. - if isFront && s.size > s.cur { - s.notifyWaiters() - } - } - s.mu.Unlock() - return err - - case <-ready: - return nil - } -} - -// TryAcquire acquires the semaphore with a weight of n without blocking. -// On success, returns true. On failure, returns false and leaves the semaphore unchanged. -func (s *Weighted) TryAcquire(n int64) bool { - s.mu.Lock() - success := s.size-s.cur >= n && s.waiters.Len() == 0 - if success { - s.cur += n - } - s.mu.Unlock() - return success -} - -// Release releases the semaphore with a weight of n. -func (s *Weighted) Release(n int64) { - s.mu.Lock() - s.cur -= n - if s.cur < 0 { - s.mu.Unlock() - panic("semaphore: released more than held") - } - s.notifyWaiters() - s.mu.Unlock() -} - -func (s *Weighted) notifyWaiters() { - for { - next := s.waiters.Front() - if next == nil { - break // No more waiters blocked. - } - - w := next.Value.(waiter) - if s.size-s.cur < w.n { - // Not enough tokens for the next waiter. We could keep going (to try to - // find a waiter with a smaller request), but under load that could cause - // starvation for large requests; instead, we leave all remaining waiters - // blocked. - // - // Consider a semaphore used as a read-write lock, with N tokens, N - // readers, and one writer. Each reader can Acquire(1) to obtain a read - // lock. The writer can Acquire(N) to obtain a write lock, excluding all - // of the readers. If we allow the readers to jump ahead in the queue, - // the writer will starve — there is always one token available for every - // reader. - break - } - - s.cur += w.n - s.waiters.Remove(next) - close(w.ready) - } -} diff --git a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s deleted file mode 100644 index db9171c2..00000000 --- a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc -// +build gc - -#include "textflag.h" - -// -// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go -// - -TEXT ·syscall6(SB),NOSPLIT,$0-88 - JMP syscall·syscall6(SB) - -TEXT ·rawSyscall6(SB),NOSPLIT,$0-88 - JMP syscall·rawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/cpu/byteorder.go b/vendor/golang.org/x/sys/cpu/byteorder.go deleted file mode 100644 index 271055be..00000000 --- a/vendor/golang.org/x/sys/cpu/byteorder.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import ( - "runtime" -) - -// byteOrder is a subset of encoding/binary.ByteOrder. -type byteOrder interface { - Uint32([]byte) uint32 - Uint64([]byte) uint64 -} - -type littleEndian struct{} -type bigEndian struct{} - -func (littleEndian) Uint32(b []byte) uint32 { - _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func (littleEndian) Uint64(b []byte) uint64 { - _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -func (bigEndian) Uint32(b []byte) uint32 { - _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 - return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 -} - -func (bigEndian) Uint64(b []byte) uint64 { - _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 - return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | - uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 -} - -// hostByteOrder returns littleEndian on little-endian machines and -// bigEndian on big-endian machines. -func hostByteOrder() byteOrder { - switch runtime.GOARCH { - case "386", "amd64", "amd64p32", - "alpha", - "arm", "arm64", - "loong64", - "mipsle", "mips64le", "mips64p32le", - "nios2", - "ppc64le", - "riscv", "riscv64", - "sh": - return littleEndian{} - case "armbe", "arm64be", - "m68k", - "mips", "mips64", "mips64p32", - "ppc", "ppc64", - "s390", "s390x", - "shbe", - "sparc", "sparc64": - return bigEndian{} - } - panic("unknown architecture") -} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go deleted file mode 100644 index 4756ad5f..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cpu implements processor feature detection for -// various CPU architectures. -package cpu - -import ( - "os" - "strings" -) - -// Initialized reports whether the CPU features were initialized. -// -// For some GOOS/GOARCH combinations initialization of the CPU features depends -// on reading an operating specific file, e.g. /proc/self/auxv on linux/arm -// Initialized will report false if reading the file fails. -var Initialized bool - -// CacheLinePad is used to pad structs to avoid false sharing. -type CacheLinePad struct{ _ [cacheLineSize]byte } - -// X86 contains the supported CPU features of the -// current X86/AMD64 platform. If the current platform -// is not X86/AMD64 then all feature flags are false. -// -// X86 is padded to avoid false sharing. Further the HasAVX -// and HasAVX2 are only set if the OS supports XMM and YMM -// registers in addition to the CPUID feature bit being set. -var X86 struct { - _ CacheLinePad - HasAES bool // AES hardware implementation (AES NI) - HasADX bool // Multi-precision add-carry instruction extensions - HasAVX bool // Advanced vector extension - HasAVX2 bool // Advanced vector extension 2 - HasAVX512 bool // Advanced vector extension 512 - HasAVX512F bool // Advanced vector extension 512 Foundation Instructions - HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions - HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions - HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions - HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions - HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions - HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions - HasAVX512IFMA bool // Advanced vector extension 512 Integer Fused Multiply Add - HasAVX512VBMI bool // Advanced vector extension 512 Vector Byte Manipulation Instructions - HasAVX5124VNNIW bool // Advanced vector extension 512 Vector Neural Network Instructions Word variable precision - HasAVX5124FMAPS bool // Advanced vector extension 512 Fused Multiply Accumulation Packed Single precision - HasAVX512VPOPCNTDQ bool // Advanced vector extension 512 Double and quad word population count instructions - HasAVX512VPCLMULQDQ bool // Advanced vector extension 512 Vector carry-less multiply operations - HasAVX512VNNI bool // Advanced vector extension 512 Vector Neural Network Instructions - HasAVX512GFNI bool // Advanced vector extension 512 Galois field New Instructions - HasAVX512VAES bool // Advanced vector extension 512 Vector AES instructions - HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2 - HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms - HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions - HasAMXTile bool // Advanced Matrix Extension Tile instructions - HasAMXInt8 bool // Advanced Matrix Extension Int8 instructions - HasAMXBF16 bool // Advanced Matrix Extension BFloat16 instructions - HasBMI1 bool // Bit manipulation instruction set 1 - HasBMI2 bool // Bit manipulation instruction set 2 - HasCX16 bool // Compare and exchange 16 Bytes - HasERMS bool // Enhanced REP for MOVSB and STOSB - HasFMA bool // Fused-multiply-add instructions - HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers. - HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM - HasPOPCNT bool // Hamming weight instruction POPCNT. - HasRDRAND bool // RDRAND instruction (on-chip random number generator) - HasRDSEED bool // RDSEED instruction (on-chip random number generator) - HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64) - HasSSE3 bool // Streaming SIMD extension 3 - HasSSSE3 bool // Supplemental streaming SIMD extension 3 - HasSSE41 bool // Streaming SIMD extension 4 and 4.1 - HasSSE42 bool // Streaming SIMD extension 4 and 4.2 - _ CacheLinePad -} - -// ARM64 contains the supported CPU features of the -// current ARMv8(aarch64) platform. If the current platform -// is not arm64 then all feature flags are false. -var ARM64 struct { - _ CacheLinePad - HasFP bool // Floating-point instruction set (always available) - HasASIMD bool // Advanced SIMD (always available) - HasEVTSTRM bool // Event stream support - HasAES bool // AES hardware implementation - HasPMULL bool // Polynomial multiplication instruction set - HasSHA1 bool // SHA1 hardware implementation - HasSHA2 bool // SHA2 hardware implementation - HasCRC32 bool // CRC32 hardware implementation - HasATOMICS bool // Atomic memory operation instruction set - HasFPHP bool // Half precision floating-point instruction set - HasASIMDHP bool // Advanced SIMD half precision instruction set - HasCPUID bool // CPUID identification scheme registers - HasASIMDRDM bool // Rounding double multiply add/subtract instruction set - HasJSCVT bool // Javascript conversion from floating-point to integer - HasFCMA bool // Floating-point multiplication and addition of complex numbers - HasLRCPC bool // Release Consistent processor consistent support - HasDCPOP bool // Persistent memory support - HasSHA3 bool // SHA3 hardware implementation - HasSM3 bool // SM3 hardware implementation - HasSM4 bool // SM4 hardware implementation - HasASIMDDP bool // Advanced SIMD double precision instruction set - HasSHA512 bool // SHA512 hardware implementation - HasSVE bool // Scalable Vector Extensions - HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 - _ CacheLinePad -} - -// ARM contains the supported CPU features of the current ARM (32-bit) platform. -// All feature flags are false if: -// 1. the current platform is not arm, or -// 2. the current operating system is not Linux. -var ARM struct { - _ CacheLinePad - HasSWP bool // SWP instruction support - HasHALF bool // Half-word load and store support - HasTHUMB bool // ARM Thumb instruction set - Has26BIT bool // Address space limited to 26-bits - HasFASTMUL bool // 32-bit operand, 64-bit result multiplication support - HasFPA bool // Floating point arithmetic support - HasVFP bool // Vector floating point support - HasEDSP bool // DSP Extensions support - HasJAVA bool // Java instruction set - HasIWMMXT bool // Intel Wireless MMX technology support - HasCRUNCH bool // MaverickCrunch context switching and handling - HasTHUMBEE bool // Thumb EE instruction set - HasNEON bool // NEON instruction set - HasVFPv3 bool // Vector floating point version 3 support - HasVFPv3D16 bool // Vector floating point version 3 D8-D15 - HasTLS bool // Thread local storage support - HasVFPv4 bool // Vector floating point version 4 support - HasIDIVA bool // Integer divide instruction support in ARM mode - HasIDIVT bool // Integer divide instruction support in Thumb mode - HasVFPD32 bool // Vector floating point version 3 D15-D31 - HasLPAE bool // Large Physical Address Extensions - HasEVTSTRM bool // Event stream support - HasAES bool // AES hardware implementation - HasPMULL bool // Polynomial multiplication instruction set - HasSHA1 bool // SHA1 hardware implementation - HasSHA2 bool // SHA2 hardware implementation - HasCRC32 bool // CRC32 hardware implementation - _ CacheLinePad -} - -// MIPS64X contains the supported CPU features of the current mips64/mips64le -// platforms. If the current platform is not mips64/mips64le or the current -// operating system is not Linux then all feature flags are false. -var MIPS64X struct { - _ CacheLinePad - HasMSA bool // MIPS SIMD architecture - _ CacheLinePad -} - -// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms. -// If the current platform is not ppc64/ppc64le then all feature flags are false. -// -// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00, -// since there are no optional categories. There are some exceptions that also -// require kernel support to work (DARN, SCV), so there are feature bits for -// those as well. The struct is padded to avoid false sharing. -var PPC64 struct { - _ CacheLinePad - HasDARN bool // Hardware random number generator (requires kernel enablement) - HasSCV bool // Syscall vectored (requires kernel enablement) - IsPOWER8 bool // ISA v2.07 (POWER8) - IsPOWER9 bool // ISA v3.00 (POWER9), implies IsPOWER8 - _ CacheLinePad -} - -// S390X contains the supported CPU features of the current IBM Z -// (s390x) platform. If the current platform is not IBM Z then all -// feature flags are false. -// -// S390X is padded to avoid false sharing. Further HasVX is only set -// if the OS supports vector registers in addition to the STFLE -// feature bit being set. -var S390X struct { - _ CacheLinePad - HasZARCH bool // z/Architecture mode is active [mandatory] - HasSTFLE bool // store facility list extended - HasLDISP bool // long (20-bit) displacements - HasEIMM bool // 32-bit immediates - HasDFP bool // decimal floating point - HasETF3EH bool // ETF-3 enhanced - HasMSA bool // message security assist (CPACF) - HasAES bool // KM-AES{128,192,256} functions - HasAESCBC bool // KMC-AES{128,192,256} functions - HasAESCTR bool // KMCTR-AES{128,192,256} functions - HasAESGCM bool // KMA-GCM-AES{128,192,256} functions - HasGHASH bool // KIMD-GHASH function - HasSHA1 bool // K{I,L}MD-SHA-1 functions - HasSHA256 bool // K{I,L}MD-SHA-256 functions - HasSHA512 bool // K{I,L}MD-SHA-512 functions - HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions - HasVX bool // vector facility - HasVXE bool // vector-enhancements facility 1 - _ CacheLinePad -} - -func init() { - archInit() - initOptions() - processOptions() -} - -// options contains the cpu debug options that can be used in GODEBUG. -// Options are arch dependent and are added by the arch specific initOptions functions. -// Features that are mandatory for the specific GOARCH should have the Required field set -// (e.g. SSE2 on amd64). -var options []option - -// Option names should be lower case. e.g. avx instead of AVX. -type option struct { - Name string - Feature *bool - Specified bool // whether feature value was specified in GODEBUG - Enable bool // whether feature should be enabled - Required bool // whether feature is mandatory and can not be disabled -} - -func processOptions() { - env := os.Getenv("GODEBUG") -field: - for env != "" { - field := "" - i := strings.IndexByte(env, ',') - if i < 0 { - field, env = env, "" - } else { - field, env = env[:i], env[i+1:] - } - if len(field) < 4 || field[:4] != "cpu." { - continue - } - i = strings.IndexByte(field, '=') - if i < 0 { - print("GODEBUG sys/cpu: no value specified for \"", field, "\"\n") - continue - } - key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on" - - var enable bool - switch value { - case "on": - enable = true - case "off": - enable = false - default: - print("GODEBUG sys/cpu: value \"", value, "\" not supported for cpu option \"", key, "\"\n") - continue field - } - - if key == "all" { - for i := range options { - options[i].Specified = true - options[i].Enable = enable || options[i].Required - } - continue field - } - - for i := range options { - if options[i].Name == key { - options[i].Specified = true - options[i].Enable = enable - continue field - } - } - - print("GODEBUG sys/cpu: unknown cpu feature \"", key, "\"\n") - } - - for _, o := range options { - if !o.Specified { - continue - } - - if o.Enable && !*o.Feature { - print("GODEBUG sys/cpu: can not enable \"", o.Name, "\", missing CPU support\n") - continue - } - - if !o.Enable && o.Required { - print("GODEBUG sys/cpu: can not disable \"", o.Name, "\", required CPU feature\n") - continue - } - - *o.Feature = o.Enable - } -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix.go b/vendor/golang.org/x/sys/cpu/cpu_aix.go deleted file mode 100644 index 8aaeef54..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_aix.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build aix -// +build aix - -package cpu - -const ( - // getsystemcfg constants - _SC_IMPL = 2 - _IMPL_POWER8 = 0x10000 - _IMPL_POWER9 = 0x20000 -) - -func archInit() { - impl := getsystemcfg(_SC_IMPL) - if impl&_IMPL_POWER8 != 0 { - PPC64.IsPOWER8 = true - } - if impl&_IMPL_POWER9 != 0 { - PPC64.IsPOWER8 = true - PPC64.IsPOWER9 = true - } - - Initialized = true -} - -func getsystemcfg(label int) (n uint64) { - r0, _ := callgetsystemcfg(label) - n = uint64(r0) - return -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm.go b/vendor/golang.org/x/sys/cpu/cpu_arm.go deleted file mode 100644 index 301b752e..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_arm.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -const cacheLineSize = 32 - -// HWCAP/HWCAP2 bits. -// These are specific to Linux. -const ( - hwcap_SWP = 1 << 0 - hwcap_HALF = 1 << 1 - hwcap_THUMB = 1 << 2 - hwcap_26BIT = 1 << 3 - hwcap_FAST_MULT = 1 << 4 - hwcap_FPA = 1 << 5 - hwcap_VFP = 1 << 6 - hwcap_EDSP = 1 << 7 - hwcap_JAVA = 1 << 8 - hwcap_IWMMXT = 1 << 9 - hwcap_CRUNCH = 1 << 10 - hwcap_THUMBEE = 1 << 11 - hwcap_NEON = 1 << 12 - hwcap_VFPv3 = 1 << 13 - hwcap_VFPv3D16 = 1 << 14 - hwcap_TLS = 1 << 15 - hwcap_VFPv4 = 1 << 16 - hwcap_IDIVA = 1 << 17 - hwcap_IDIVT = 1 << 18 - hwcap_VFPD32 = 1 << 19 - hwcap_LPAE = 1 << 20 - hwcap_EVTSTRM = 1 << 21 - - hwcap2_AES = 1 << 0 - hwcap2_PMULL = 1 << 1 - hwcap2_SHA1 = 1 << 2 - hwcap2_SHA2 = 1 << 3 - hwcap2_CRC32 = 1 << 4 -) - -func initOptions() { - options = []option{ - {Name: "pmull", Feature: &ARM.HasPMULL}, - {Name: "sha1", Feature: &ARM.HasSHA1}, - {Name: "sha2", Feature: &ARM.HasSHA2}, - {Name: "swp", Feature: &ARM.HasSWP}, - {Name: "thumb", Feature: &ARM.HasTHUMB}, - {Name: "thumbee", Feature: &ARM.HasTHUMBEE}, - {Name: "tls", Feature: &ARM.HasTLS}, - {Name: "vfp", Feature: &ARM.HasVFP}, - {Name: "vfpd32", Feature: &ARM.HasVFPD32}, - {Name: "vfpv3", Feature: &ARM.HasVFPv3}, - {Name: "vfpv3d16", Feature: &ARM.HasVFPv3D16}, - {Name: "vfpv4", Feature: &ARM.HasVFPv4}, - {Name: "half", Feature: &ARM.HasHALF}, - {Name: "26bit", Feature: &ARM.Has26BIT}, - {Name: "fastmul", Feature: &ARM.HasFASTMUL}, - {Name: "fpa", Feature: &ARM.HasFPA}, - {Name: "edsp", Feature: &ARM.HasEDSP}, - {Name: "java", Feature: &ARM.HasJAVA}, - {Name: "iwmmxt", Feature: &ARM.HasIWMMXT}, - {Name: "crunch", Feature: &ARM.HasCRUNCH}, - {Name: "neon", Feature: &ARM.HasNEON}, - {Name: "idivt", Feature: &ARM.HasIDIVT}, - {Name: "idiva", Feature: &ARM.HasIDIVA}, - {Name: "lpae", Feature: &ARM.HasLPAE}, - {Name: "evtstrm", Feature: &ARM.HasEVTSTRM}, - {Name: "aes", Feature: &ARM.HasAES}, - {Name: "crc32", Feature: &ARM.HasCRC32}, - } - -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go deleted file mode 100644 index f3eb993b..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import "runtime" - -// cacheLineSize is used to prevent false sharing of cache lines. -// We choose 128 because Apple Silicon, a.k.a. M1, has 128-byte cache line size. -// It doesn't cost much and is much more future-proof. -const cacheLineSize = 128 - -func initOptions() { - options = []option{ - {Name: "fp", Feature: &ARM64.HasFP}, - {Name: "asimd", Feature: &ARM64.HasASIMD}, - {Name: "evstrm", Feature: &ARM64.HasEVTSTRM}, - {Name: "aes", Feature: &ARM64.HasAES}, - {Name: "fphp", Feature: &ARM64.HasFPHP}, - {Name: "jscvt", Feature: &ARM64.HasJSCVT}, - {Name: "lrcpc", Feature: &ARM64.HasLRCPC}, - {Name: "pmull", Feature: &ARM64.HasPMULL}, - {Name: "sha1", Feature: &ARM64.HasSHA1}, - {Name: "sha2", Feature: &ARM64.HasSHA2}, - {Name: "sha3", Feature: &ARM64.HasSHA3}, - {Name: "sha512", Feature: &ARM64.HasSHA512}, - {Name: "sm3", Feature: &ARM64.HasSM3}, - {Name: "sm4", Feature: &ARM64.HasSM4}, - {Name: "sve", Feature: &ARM64.HasSVE}, - {Name: "crc32", Feature: &ARM64.HasCRC32}, - {Name: "atomics", Feature: &ARM64.HasATOMICS}, - {Name: "asimdhp", Feature: &ARM64.HasASIMDHP}, - {Name: "cpuid", Feature: &ARM64.HasCPUID}, - {Name: "asimrdm", Feature: &ARM64.HasASIMDRDM}, - {Name: "fcma", Feature: &ARM64.HasFCMA}, - {Name: "dcpop", Feature: &ARM64.HasDCPOP}, - {Name: "asimddp", Feature: &ARM64.HasASIMDDP}, - {Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM}, - } -} - -func archInit() { - switch runtime.GOOS { - case "freebsd": - readARM64Registers() - case "linux", "netbsd", "openbsd": - doinit() - default: - // Many platforms don't seem to allow reading these registers. - setMinimalFeatures() - } -} - -// setMinimalFeatures fakes the minimal ARM64 features expected by -// TestARM64minimalFeatures. -func setMinimalFeatures() { - ARM64.HasASIMD = true - ARM64.HasFP = true -} - -func readARM64Registers() { - Initialized = true - - parseARM64SystemRegisters(getisar0(), getisar1(), getpfr0()) -} - -func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { - // ID_AA64ISAR0_EL1 - switch extractBits(isar0, 4, 7) { - case 1: - ARM64.HasAES = true - case 2: - ARM64.HasAES = true - ARM64.HasPMULL = true - } - - switch extractBits(isar0, 8, 11) { - case 1: - ARM64.HasSHA1 = true - } - - switch extractBits(isar0, 12, 15) { - case 1: - ARM64.HasSHA2 = true - case 2: - ARM64.HasSHA2 = true - ARM64.HasSHA512 = true - } - - switch extractBits(isar0, 16, 19) { - case 1: - ARM64.HasCRC32 = true - } - - switch extractBits(isar0, 20, 23) { - case 2: - ARM64.HasATOMICS = true - } - - switch extractBits(isar0, 28, 31) { - case 1: - ARM64.HasASIMDRDM = true - } - - switch extractBits(isar0, 32, 35) { - case 1: - ARM64.HasSHA3 = true - } - - switch extractBits(isar0, 36, 39) { - case 1: - ARM64.HasSM3 = true - } - - switch extractBits(isar0, 40, 43) { - case 1: - ARM64.HasSM4 = true - } - - switch extractBits(isar0, 44, 47) { - case 1: - ARM64.HasASIMDDP = true - } - - // ID_AA64ISAR1_EL1 - switch extractBits(isar1, 0, 3) { - case 1: - ARM64.HasDCPOP = true - } - - switch extractBits(isar1, 12, 15) { - case 1: - ARM64.HasJSCVT = true - } - - switch extractBits(isar1, 16, 19) { - case 1: - ARM64.HasFCMA = true - } - - switch extractBits(isar1, 20, 23) { - case 1: - ARM64.HasLRCPC = true - } - - // ID_AA64PFR0_EL1 - switch extractBits(pfr0, 16, 19) { - case 0: - ARM64.HasFP = true - case 1: - ARM64.HasFP = true - ARM64.HasFPHP = true - } - - switch extractBits(pfr0, 20, 23) { - case 0: - ARM64.HasASIMD = true - case 1: - ARM64.HasASIMD = true - ARM64.HasASIMDHP = true - } - - switch extractBits(pfr0, 32, 35) { - case 1: - ARM64.HasSVE = true - } -} - -func extractBits(data uint64, start, end uint) uint { - return (uint)(data>>start) & ((1 << (end - start + 1)) - 1) -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s deleted file mode 100644 index c61f95a0..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.s +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc -// +build gc - -#include "textflag.h" - -// func getisar0() uint64 -TEXT ·getisar0(SB),NOSPLIT,$0-8 - // get Instruction Set Attributes 0 into x0 - // mrs x0, ID_AA64ISAR0_EL1 = d5380600 - WORD $0xd5380600 - MOVD R0, ret+0(FP) - RET - -// func getisar1() uint64 -TEXT ·getisar1(SB),NOSPLIT,$0-8 - // get Instruction Set Attributes 1 into x0 - // mrs x0, ID_AA64ISAR1_EL1 = d5380620 - WORD $0xd5380620 - MOVD R0, ret+0(FP) - RET - -// func getpfr0() uint64 -TEXT ·getpfr0(SB),NOSPLIT,$0-8 - // get Processor Feature Register 0 into x0 - // mrs x0, ID_AA64PFR0_EL1 = d5380400 - WORD $0xd5380400 - MOVD R0, ret+0(FP) - RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go deleted file mode 100644 index ccf542a7..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc -// +build gc - -package cpu - -func getisar0() uint64 -func getisar1() uint64 -func getpfr0() uint64 diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go deleted file mode 100644 index 0af2f248..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc -// +build gc - -package cpu - -// haveAsmFunctions reports whether the other functions in this file can -// be safely called. -func haveAsmFunctions() bool { return true } - -// The following feature detection functions are defined in cpu_s390x.s. -// They are likely to be expensive to call so the results should be cached. -func stfle() facilityList -func kmQuery() queryResult -func kmcQuery() queryResult -func kmctrQuery() queryResult -func kmaQuery() queryResult -func kimdQuery() queryResult -func klmdQuery() queryResult diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go deleted file mode 100644 index fa7cdb9b..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (386 || amd64 || amd64p32) && gc -// +build 386 amd64 amd64p32 -// +build gc - -package cpu - -// cpuid is implemented in cpu_x86.s for gc compiler -// and in cpu_gccgo.c for gccgo. -func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) - -// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler -// and in cpu_gccgo.c for gccgo. -func xgetbv() (eax, edx uint32) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go deleted file mode 100644 index 2aff3189..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gccgo -// +build gccgo - -package cpu - -func getisar0() uint64 { return 0 } -func getisar1() uint64 { return 0 } -func getpfr0() uint64 { return 0 } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go deleted file mode 100644 index 4bfbda61..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gccgo -// +build gccgo - -package cpu - -// haveAsmFunctions reports whether the other functions in this file can -// be safely called. -func haveAsmFunctions() bool { return false } - -// TODO(mundaym): the following feature detection functions are currently -// stubs. See https://golang.org/cl/162887 for how to fix this. -// They are likely to be expensive to call so the results should be cached. -func stfle() facilityList { panic("not implemented for gccgo") } -func kmQuery() queryResult { panic("not implemented for gccgo") } -func kmcQuery() queryResult { panic("not implemented for gccgo") } -func kmctrQuery() queryResult { panic("not implemented for gccgo") } -func kmaQuery() queryResult { panic("not implemented for gccgo") } -func kimdQuery() queryResult { panic("not implemented for gccgo") } -func klmdQuery() queryResult { panic("not implemented for gccgo") } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c deleted file mode 100644 index 6cc73109..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (386 || amd64 || amd64p32) && gccgo -// +build 386 amd64 amd64p32 -// +build gccgo - -#include -#include -#include - -// Need to wrap __get_cpuid_count because it's declared as static. -int -gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, - uint32_t *eax, uint32_t *ebx, - uint32_t *ecx, uint32_t *edx) -{ - return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); -} - -#pragma GCC diagnostic ignored "-Wunknown-pragmas" -#pragma GCC push_options -#pragma GCC target("xsave") -#pragma clang attribute push (__attribute__((target("xsave"))), apply_to=function) - -// xgetbv reads the contents of an XCR (Extended Control Register) -// specified in the ECX register into registers EDX:EAX. -// Currently, the only supported value for XCR is 0. -void -gccgoXgetbv(uint32_t *eax, uint32_t *edx) -{ - uint64_t v = _xgetbv(0); - *eax = v & 0xffffffff; - *edx = v >> 32; -} - -#pragma clang attribute pop -#pragma GCC pop_options diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go deleted file mode 100644 index 863d415a..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (386 || amd64 || amd64p32) && gccgo -// +build 386 amd64 amd64p32 -// +build gccgo - -package cpu - -//extern gccgoGetCpuidCount -func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32) - -func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) { - var a, b, c, d uint32 - gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d) - return a, b, c, d -} - -//extern gccgoXgetbv -func gccgoXgetbv(eax, edx *uint32) - -func xgetbv() (eax, edx uint32) { - var a, d uint32 - gccgoXgetbv(&a, &d) - return a, d -} - -// gccgo doesn't build on Darwin, per: -// https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/gcc.rb#L76 -func darwinSupportsAVX512() bool { - return false -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go deleted file mode 100644 index 159a686f..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !386 && !amd64 && !amd64p32 && !arm64 -// +build !386,!amd64,!amd64p32,!arm64 - -package cpu - -func archInit() { - if err := readHWCAP(); err != nil { - return - } - doinit() - Initialized = true -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go deleted file mode 100644 index 2057006d..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -func doinit() { - ARM.HasSWP = isSet(hwCap, hwcap_SWP) - ARM.HasHALF = isSet(hwCap, hwcap_HALF) - ARM.HasTHUMB = isSet(hwCap, hwcap_THUMB) - ARM.Has26BIT = isSet(hwCap, hwcap_26BIT) - ARM.HasFASTMUL = isSet(hwCap, hwcap_FAST_MULT) - ARM.HasFPA = isSet(hwCap, hwcap_FPA) - ARM.HasVFP = isSet(hwCap, hwcap_VFP) - ARM.HasEDSP = isSet(hwCap, hwcap_EDSP) - ARM.HasJAVA = isSet(hwCap, hwcap_JAVA) - ARM.HasIWMMXT = isSet(hwCap, hwcap_IWMMXT) - ARM.HasCRUNCH = isSet(hwCap, hwcap_CRUNCH) - ARM.HasTHUMBEE = isSet(hwCap, hwcap_THUMBEE) - ARM.HasNEON = isSet(hwCap, hwcap_NEON) - ARM.HasVFPv3 = isSet(hwCap, hwcap_VFPv3) - ARM.HasVFPv3D16 = isSet(hwCap, hwcap_VFPv3D16) - ARM.HasTLS = isSet(hwCap, hwcap_TLS) - ARM.HasVFPv4 = isSet(hwCap, hwcap_VFPv4) - ARM.HasIDIVA = isSet(hwCap, hwcap_IDIVA) - ARM.HasIDIVT = isSet(hwCap, hwcap_IDIVT) - ARM.HasVFPD32 = isSet(hwCap, hwcap_VFPD32) - ARM.HasLPAE = isSet(hwCap, hwcap_LPAE) - ARM.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) - ARM.HasAES = isSet(hwCap2, hwcap2_AES) - ARM.HasPMULL = isSet(hwCap2, hwcap2_PMULL) - ARM.HasSHA1 = isSet(hwCap2, hwcap2_SHA1) - ARM.HasSHA2 = isSet(hwCap2, hwcap2_SHA2) - ARM.HasCRC32 = isSet(hwCap2, hwcap2_CRC32) -} - -func isSet(hwc uint, value uint) bool { - return hwc&value != 0 -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go deleted file mode 100644 index a968b80f..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import ( - "strings" - "syscall" -) - -// HWCAP/HWCAP2 bits. These are exposed by Linux. -const ( - hwcap_FP = 1 << 0 - hwcap_ASIMD = 1 << 1 - hwcap_EVTSTRM = 1 << 2 - hwcap_AES = 1 << 3 - hwcap_PMULL = 1 << 4 - hwcap_SHA1 = 1 << 5 - hwcap_SHA2 = 1 << 6 - hwcap_CRC32 = 1 << 7 - hwcap_ATOMICS = 1 << 8 - hwcap_FPHP = 1 << 9 - hwcap_ASIMDHP = 1 << 10 - hwcap_CPUID = 1 << 11 - hwcap_ASIMDRDM = 1 << 12 - hwcap_JSCVT = 1 << 13 - hwcap_FCMA = 1 << 14 - hwcap_LRCPC = 1 << 15 - hwcap_DCPOP = 1 << 16 - hwcap_SHA3 = 1 << 17 - hwcap_SM3 = 1 << 18 - hwcap_SM4 = 1 << 19 - hwcap_ASIMDDP = 1 << 20 - hwcap_SHA512 = 1 << 21 - hwcap_SVE = 1 << 22 - hwcap_ASIMDFHM = 1 << 23 -) - -// linuxKernelCanEmulateCPUID reports whether we're running -// on Linux 4.11+. Ideally we'd like to ask the question about -// whether the current kernel contains -// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=77c97b4ee21290f5f083173d957843b615abbff2 -// but the version number will have to do. -func linuxKernelCanEmulateCPUID() bool { - var un syscall.Utsname - syscall.Uname(&un) - var sb strings.Builder - for _, b := range un.Release[:] { - if b == 0 { - break - } - sb.WriteByte(byte(b)) - } - major, minor, _, ok := parseRelease(sb.String()) - return ok && (major > 4 || major == 4 && minor >= 11) -} - -func doinit() { - if err := readHWCAP(); err != nil { - // We failed to read /proc/self/auxv. This can happen if the binary has - // been given extra capabilities(7) with /bin/setcap. - // - // When this happens, we have two options. If the Linux kernel is new - // enough (4.11+), we can read the arm64 registers directly which'll - // trap into the kernel and then return back to userspace. - // - // But on older kernels, such as Linux 4.4.180 as used on many Synology - // devices, calling readARM64Registers (specifically getisar0) will - // cause a SIGILL and we'll die. So for older kernels, parse /proc/cpuinfo - // instead. - // - // See golang/go#57336. - if linuxKernelCanEmulateCPUID() { - readARM64Registers() - } else { - readLinuxProcCPUInfo() - } - return - } - - // HWCAP feature bits - ARM64.HasFP = isSet(hwCap, hwcap_FP) - ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD) - ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) - ARM64.HasAES = isSet(hwCap, hwcap_AES) - ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL) - ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1) - ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2) - ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32) - ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS) - ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP) - ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP) - ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID) - ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM) - ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT) - ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA) - ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC) - ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP) - ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3) - ARM64.HasSM3 = isSet(hwCap, hwcap_SM3) - ARM64.HasSM4 = isSet(hwCap, hwcap_SM4) - ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP) - ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) - ARM64.HasSVE = isSet(hwCap, hwcap_SVE) - ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) -} - -func isSet(hwc uint, value uint) bool { - return hwc&value != 0 -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go deleted file mode 100644 index 6000db4c..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux && (mips64 || mips64le) -// +build linux -// +build mips64 mips64le - -package cpu - -// HWCAP bits. These are exposed by the Linux kernel 5.4. -const ( - // CPU features - hwcap_MIPS_MSA = 1 << 1 -) - -func doinit() { - // HWCAP feature bits - MIPS64X.HasMSA = isSet(hwCap, hwcap_MIPS_MSA) -} - -func isSet(hwc uint, value uint) bool { - return hwc&value != 0 -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go deleted file mode 100644 index f4992b1a..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x -// +build linux,!arm,!arm64,!mips64,!mips64le,!ppc64,!ppc64le,!s390x - -package cpu - -func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go deleted file mode 100644 index 021356d6..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux && (ppc64 || ppc64le) -// +build linux -// +build ppc64 ppc64le - -package cpu - -// HWCAP/HWCAP2 bits. These are exposed by the kernel. -const ( - // ISA Level - _PPC_FEATURE2_ARCH_2_07 = 0x80000000 - _PPC_FEATURE2_ARCH_3_00 = 0x00800000 - - // CPU features - _PPC_FEATURE2_DARN = 0x00200000 - _PPC_FEATURE2_SCV = 0x00100000 -) - -func doinit() { - // HWCAP2 feature bits - PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07) - PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00) - PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN) - PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV) -} - -func isSet(hwc uint, value uint) bool { - return hwc&value != 0 -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go deleted file mode 100644 index 1517ac61..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -const ( - // bit mask values from /usr/include/bits/hwcap.h - hwcap_ZARCH = 2 - hwcap_STFLE = 4 - hwcap_MSA = 8 - hwcap_LDISP = 16 - hwcap_EIMM = 32 - hwcap_DFP = 64 - hwcap_ETF3EH = 256 - hwcap_VX = 2048 - hwcap_VXE = 8192 -) - -func initS390Xbase() { - // test HWCAP bit vector - has := func(featureMask uint) bool { - return hwCap&featureMask == featureMask - } - - // mandatory - S390X.HasZARCH = has(hwcap_ZARCH) - - // optional - S390X.HasSTFLE = has(hwcap_STFLE) - S390X.HasLDISP = has(hwcap_LDISP) - S390X.HasEIMM = has(hwcap_EIMM) - S390X.HasETF3EH = has(hwcap_ETF3EH) - S390X.HasDFP = has(hwcap_DFP) - S390X.HasMSA = has(hwcap_MSA) - S390X.HasVX = has(hwcap_VX) - if S390X.HasVX { - S390X.HasVXE = has(hwcap_VXE) - } -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_loong64.go deleted file mode 100644 index 0f57b05b..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_loong64.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build loong64 -// +build loong64 - -package cpu - -const cacheLineSize = 64 - -func initOptions() { -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go deleted file mode 100644 index f4063c66..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build mips64 || mips64le -// +build mips64 mips64le - -package cpu - -const cacheLineSize = 32 - -func initOptions() { - options = []option{ - {Name: "msa", Feature: &MIPS64X.HasMSA}, - } -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go deleted file mode 100644 index 07c4e36d..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build mips || mipsle -// +build mips mipsle - -package cpu - -const cacheLineSize = 32 - -func initOptions() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go deleted file mode 100644 index ebfb3fc8..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import ( - "syscall" - "unsafe" -) - -// Minimal copy of functionality from x/sys/unix so the cpu package can call -// sysctl without depending on x/sys/unix. - -const ( - _CTL_QUERY = -2 - - _SYSCTL_VERS_1 = 0x1000000 -) - -var _zero uintptr - -func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, errno := syscall.Syscall6( - syscall.SYS___SYSCTL, - uintptr(_p0), - uintptr(len(mib)), - uintptr(unsafe.Pointer(old)), - uintptr(unsafe.Pointer(oldlen)), - uintptr(unsafe.Pointer(new)), - uintptr(newlen)) - if errno != 0 { - return errno - } - return nil -} - -type sysctlNode struct { - Flags uint32 - Num int32 - Name [32]int8 - Ver uint32 - __rsvd uint32 - Un [16]byte - _sysctl_size [8]byte - _sysctl_func [8]byte - _sysctl_parent [8]byte - _sysctl_desc [8]byte -} - -func sysctlNodes(mib []int32) ([]sysctlNode, error) { - var olen uintptr - - // Get a list of all sysctl nodes below the given MIB by performing - // a sysctl for the given MIB with CTL_QUERY appended. - mib = append(mib, _CTL_QUERY) - qnode := sysctlNode{Flags: _SYSCTL_VERS_1} - qp := (*byte)(unsafe.Pointer(&qnode)) - sz := unsafe.Sizeof(qnode) - if err := sysctl(mib, nil, &olen, qp, sz); err != nil { - return nil, err - } - - // Now that we know the size, get the actual nodes. - nodes := make([]sysctlNode, olen/sz) - np := (*byte)(unsafe.Pointer(&nodes[0])) - if err := sysctl(mib, np, &olen, qp, sz); err != nil { - return nil, err - } - - return nodes, nil -} - -func nametomib(name string) ([]int32, error) { - // Split name into components. - var parts []string - last := 0 - for i := 0; i < len(name); i++ { - if name[i] == '.' { - parts = append(parts, name[last:i]) - last = i + 1 - } - } - parts = append(parts, name[last:]) - - mib := []int32{} - // Discover the nodes and construct the MIB OID. - for partno, part := range parts { - nodes, err := sysctlNodes(mib) - if err != nil { - return nil, err - } - for _, node := range nodes { - n := make([]byte, 0) - for i := range node.Name { - if node.Name[i] != 0 { - n = append(n, byte(node.Name[i])) - } - } - if string(n) == part { - mib = append(mib, int32(node.Num)) - break - } - } - if len(mib) != partno+1 { - return nil, err - } - } - - return mib, nil -} - -// aarch64SysctlCPUID is struct aarch64_sysctl_cpu_id from NetBSD's -type aarch64SysctlCPUID struct { - midr uint64 /* Main ID Register */ - revidr uint64 /* Revision ID Register */ - mpidr uint64 /* Multiprocessor Affinity Register */ - aa64dfr0 uint64 /* A64 Debug Feature Register 0 */ - aa64dfr1 uint64 /* A64 Debug Feature Register 1 */ - aa64isar0 uint64 /* A64 Instruction Set Attribute Register 0 */ - aa64isar1 uint64 /* A64 Instruction Set Attribute Register 1 */ - aa64mmfr0 uint64 /* A64 Memory Model Feature Register 0 */ - aa64mmfr1 uint64 /* A64 Memory Model Feature Register 1 */ - aa64mmfr2 uint64 /* A64 Memory Model Feature Register 2 */ - aa64pfr0 uint64 /* A64 Processor Feature Register 0 */ - aa64pfr1 uint64 /* A64 Processor Feature Register 1 */ - aa64zfr0 uint64 /* A64 SVE Feature ID Register 0 */ - mvfr0 uint32 /* Media and VFP Feature Register 0 */ - mvfr1 uint32 /* Media and VFP Feature Register 1 */ - mvfr2 uint32 /* Media and VFP Feature Register 2 */ - pad uint32 - clidr uint64 /* Cache Level ID Register */ - ctr uint64 /* Cache Type Register */ -} - -func sysctlCPUID(name string) (*aarch64SysctlCPUID, error) { - mib, err := nametomib(name) - if err != nil { - return nil, err - } - - out := aarch64SysctlCPUID{} - n := unsafe.Sizeof(out) - _, _, errno := syscall.Syscall6( - syscall.SYS___SYSCTL, - uintptr(unsafe.Pointer(&mib[0])), - uintptr(len(mib)), - uintptr(unsafe.Pointer(&out)), - uintptr(unsafe.Pointer(&n)), - uintptr(0), - uintptr(0)) - if errno != 0 { - return nil, errno - } - return &out, nil -} - -func doinit() { - cpuid, err := sysctlCPUID("machdep.cpu0.cpu_id") - if err != nil { - setMinimalFeatures() - return - } - parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0) - - Initialized = true -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go deleted file mode 100644 index 85b64d5c..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import ( - "syscall" - "unsafe" -) - -// Minimal copy of functionality from x/sys/unix so the cpu package can call -// sysctl without depending on x/sys/unix. - -const ( - // From OpenBSD's sys/sysctl.h. - _CTL_MACHDEP = 7 - - // From OpenBSD's machine/cpu.h. - _CPU_ID_AA64ISAR0 = 2 - _CPU_ID_AA64ISAR1 = 3 -) - -// Implemented in the runtime package (runtime/sys_openbsd3.go) -func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) - -//go:linkname syscall_syscall6 syscall.syscall6 - -func sysctl(mib []uint32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - _, _, errno := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if errno != 0 { - return errno - } - return nil -} - -var libc_sysctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" - -func sysctlUint64(mib []uint32) (uint64, bool) { - var out uint64 - nout := unsafe.Sizeof(out) - if err := sysctl(mib, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); err != nil { - return 0, false - } - return out, true -} - -func doinit() { - setMinimalFeatures() - - // Get ID_AA64ISAR0 and ID_AA64ISAR1 from sysctl. - isar0, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR0}) - if !ok { - return - } - isar1, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR1}) - if !ok { - return - } - parseARM64SystemRegisters(isar0, isar1, 0) - - Initialized = true -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s deleted file mode 100644 index 054ba05d..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "textflag.h" - -TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_sysctl(SB) - -GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 -DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go deleted file mode 100644 index d7b4fb4c..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !linux && arm -// +build !linux,arm - -package cpu - -func archInit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go deleted file mode 100644 index f3cde129..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !linux && !netbsd && !openbsd && arm64 -// +build !linux,!netbsd,!openbsd,arm64 - -package cpu - -func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go deleted file mode 100644 index 0dafe964..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !linux && (mips64 || mips64le) -// +build !linux -// +build mips64 mips64le - -package cpu - -func archInit() { - Initialized = true -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go deleted file mode 100644 index 060d46b6..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !aix && !linux && (ppc64 || ppc64le) -// +build !aix -// +build !linux -// +build ppc64 ppc64le - -package cpu - -func archInit() { - PPC64.IsPOWER8 = true - Initialized = true -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go deleted file mode 100644 index dd10eb79..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !linux && riscv64 -// +build !linux,riscv64 - -package cpu - -func archInit() { - Initialized = true -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go deleted file mode 100644 index 4e8acd16..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build ppc64 || ppc64le -// +build ppc64 ppc64le - -package cpu - -const cacheLineSize = 128 - -func initOptions() { - options = []option{ - {Name: "darn", Feature: &PPC64.HasDARN}, - {Name: "scv", Feature: &PPC64.HasSCV}, - } -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go deleted file mode 100644 index ff7da60e..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build riscv64 -// +build riscv64 - -package cpu - -const cacheLineSize = 64 - -func initOptions() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_s390x.go deleted file mode 100644 index 5881b883..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_s390x.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -const cacheLineSize = 256 - -func initOptions() { - options = []option{ - {Name: "zarch", Feature: &S390X.HasZARCH, Required: true}, - {Name: "stfle", Feature: &S390X.HasSTFLE, Required: true}, - {Name: "ldisp", Feature: &S390X.HasLDISP, Required: true}, - {Name: "eimm", Feature: &S390X.HasEIMM, Required: true}, - {Name: "dfp", Feature: &S390X.HasDFP}, - {Name: "etf3eh", Feature: &S390X.HasETF3EH}, - {Name: "msa", Feature: &S390X.HasMSA}, - {Name: "aes", Feature: &S390X.HasAES}, - {Name: "aescbc", Feature: &S390X.HasAESCBC}, - {Name: "aesctr", Feature: &S390X.HasAESCTR}, - {Name: "aesgcm", Feature: &S390X.HasAESGCM}, - {Name: "ghash", Feature: &S390X.HasGHASH}, - {Name: "sha1", Feature: &S390X.HasSHA1}, - {Name: "sha256", Feature: &S390X.HasSHA256}, - {Name: "sha3", Feature: &S390X.HasSHA3}, - {Name: "sha512", Feature: &S390X.HasSHA512}, - {Name: "vx", Feature: &S390X.HasVX}, - {Name: "vxe", Feature: &S390X.HasVXE}, - } -} - -// bitIsSet reports whether the bit at index is set. The bit index -// is in big endian order, so bit index 0 is the leftmost bit. -func bitIsSet(bits []uint64, index uint) bool { - return bits[index/64]&((1<<63)>>(index%64)) != 0 -} - -// facility is a bit index for the named facility. -type facility uint8 - -const ( - // mandatory facilities - zarch facility = 1 // z architecture mode is active - stflef facility = 7 // store-facility-list-extended - ldisp facility = 18 // long-displacement - eimm facility = 21 // extended-immediate - - // miscellaneous facilities - dfp facility = 42 // decimal-floating-point - etf3eh facility = 30 // extended-translation 3 enhancement - - // cryptography facilities - msa facility = 17 // message-security-assist - msa3 facility = 76 // message-security-assist extension 3 - msa4 facility = 77 // message-security-assist extension 4 - msa5 facility = 57 // message-security-assist extension 5 - msa8 facility = 146 // message-security-assist extension 8 - msa9 facility = 155 // message-security-assist extension 9 - - // vector facilities - vx facility = 129 // vector facility - vxe facility = 135 // vector-enhancements 1 - vxe2 facility = 148 // vector-enhancements 2 -) - -// facilityList contains the result of an STFLE call. -// Bits are numbered in big endian order so the -// leftmost bit (the MSB) is at index 0. -type facilityList struct { - bits [4]uint64 -} - -// Has reports whether the given facilities are present. -func (s *facilityList) Has(fs ...facility) bool { - if len(fs) == 0 { - panic("no facility bits provided") - } - for _, f := range fs { - if !bitIsSet(s.bits[:], uint(f)) { - return false - } - } - return true -} - -// function is the code for the named cryptographic function. -type function uint8 - -const ( - // KM{,A,C,CTR} function codes - aes128 function = 18 // AES-128 - aes192 function = 19 // AES-192 - aes256 function = 20 // AES-256 - - // K{I,L}MD function codes - sha1 function = 1 // SHA-1 - sha256 function = 2 // SHA-256 - sha512 function = 3 // SHA-512 - sha3_224 function = 32 // SHA3-224 - sha3_256 function = 33 // SHA3-256 - sha3_384 function = 34 // SHA3-384 - sha3_512 function = 35 // SHA3-512 - shake128 function = 36 // SHAKE-128 - shake256 function = 37 // SHAKE-256 - - // KLMD function codes - ghash function = 65 // GHASH -) - -// queryResult contains the result of a Query function -// call. Bits are numbered in big endian order so the -// leftmost bit (the MSB) is at index 0. -type queryResult struct { - bits [2]uint64 -} - -// Has reports whether the given functions are present. -func (q *queryResult) Has(fns ...function) bool { - if len(fns) == 0 { - panic("no function codes provided") - } - for _, f := range fns { - if !bitIsSet(q.bits[:], uint(f)) { - return false - } - } - return true -} - -func doinit() { - initS390Xbase() - - // We need implementations of stfle, km and so on - // to detect cryptographic features. - if !haveAsmFunctions() { - return - } - - // optional cryptographic functions - if S390X.HasMSA { - aes := []function{aes128, aes192, aes256} - - // cipher message - km, kmc := kmQuery(), kmcQuery() - S390X.HasAES = km.Has(aes...) - S390X.HasAESCBC = kmc.Has(aes...) - if S390X.HasSTFLE { - facilities := stfle() - if facilities.Has(msa4) { - kmctr := kmctrQuery() - S390X.HasAESCTR = kmctr.Has(aes...) - } - if facilities.Has(msa8) { - kma := kmaQuery() - S390X.HasAESGCM = kma.Has(aes...) - } - } - - // compute message digest - kimd := kimdQuery() // intermediate (no padding) - klmd := klmdQuery() // last (padding) - S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1) - S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256) - S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) - S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist - sha3 := []function{ - sha3_224, sha3_256, sha3_384, sha3_512, - shake128, shake256, - } - S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...) - } -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/vendor/golang.org/x/sys/cpu/cpu_s390x.s deleted file mode 100644 index 96f81e20..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_s390x.s +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc -// +build gc - -#include "textflag.h" - -// func stfle() facilityList -TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32 - MOVD $ret+0(FP), R1 - MOVD $3, R0 // last doubleword index to store - XC $32, (R1), (R1) // clear 4 doublewords (32 bytes) - WORD $0xb2b01000 // store facility list extended (STFLE) - RET - -// func kmQuery() queryResult -TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KM-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB92E0024 // cipher message (KM) - RET - -// func kmcQuery() queryResult -TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KMC-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB92F0024 // cipher message with chaining (KMC) - RET - -// func kmctrQuery() queryResult -TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KMCTR-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB92D4024 // cipher message with counter (KMCTR) - RET - -// func kmaQuery() queryResult -TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KMA-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xb9296024 // cipher message with authentication (KMA) - RET - -// func kimdQuery() queryResult -TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KIMD-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB93E0024 // compute intermediate message digest (KIMD) - RET - -// func klmdQuery() queryResult -TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KLMD-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB93F0024 // compute last message digest (KLMD) - RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go deleted file mode 100644 index 7747d888..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_wasm.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build wasm -// +build wasm - -package cpu - -// We're compiling the cpu package for an unknown (software-abstracted) CPU. -// Make CacheLinePad an empty struct and hope that the usual struct alignment -// rules are good enough. - -const cacheLineSize = 0 - -func initOptions() {} - -func archInit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go deleted file mode 100644 index 2dcde828..00000000 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build 386 || amd64 || amd64p32 -// +build 386 amd64 amd64p32 - -package cpu - -import "runtime" - -const cacheLineSize = 64 - -func initOptions() { - options = []option{ - {Name: "adx", Feature: &X86.HasADX}, - {Name: "aes", Feature: &X86.HasAES}, - {Name: "avx", Feature: &X86.HasAVX}, - {Name: "avx2", Feature: &X86.HasAVX2}, - {Name: "avx512", Feature: &X86.HasAVX512}, - {Name: "avx512f", Feature: &X86.HasAVX512F}, - {Name: "avx512cd", Feature: &X86.HasAVX512CD}, - {Name: "avx512er", Feature: &X86.HasAVX512ER}, - {Name: "avx512pf", Feature: &X86.HasAVX512PF}, - {Name: "avx512vl", Feature: &X86.HasAVX512VL}, - {Name: "avx512bw", Feature: &X86.HasAVX512BW}, - {Name: "avx512dq", Feature: &X86.HasAVX512DQ}, - {Name: "avx512ifma", Feature: &X86.HasAVX512IFMA}, - {Name: "avx512vbmi", Feature: &X86.HasAVX512VBMI}, - {Name: "avx512vnniw", Feature: &X86.HasAVX5124VNNIW}, - {Name: "avx5124fmaps", Feature: &X86.HasAVX5124FMAPS}, - {Name: "avx512vpopcntdq", Feature: &X86.HasAVX512VPOPCNTDQ}, - {Name: "avx512vpclmulqdq", Feature: &X86.HasAVX512VPCLMULQDQ}, - {Name: "avx512vnni", Feature: &X86.HasAVX512VNNI}, - {Name: "avx512gfni", Feature: &X86.HasAVX512GFNI}, - {Name: "avx512vaes", Feature: &X86.HasAVX512VAES}, - {Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2}, - {Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG}, - {Name: "avx512bf16", Feature: &X86.HasAVX512BF16}, - {Name: "amxtile", Feature: &X86.HasAMXTile}, - {Name: "amxint8", Feature: &X86.HasAMXInt8}, - {Name: "amxbf16", Feature: &X86.HasAMXBF16}, - {Name: "bmi1", Feature: &X86.HasBMI1}, - {Name: "bmi2", Feature: &X86.HasBMI2}, - {Name: "cx16", Feature: &X86.HasCX16}, - {Name: "erms", Feature: &X86.HasERMS}, - {Name: "fma", Feature: &X86.HasFMA}, - {Name: "osxsave", Feature: &X86.HasOSXSAVE}, - {Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ}, - {Name: "popcnt", Feature: &X86.HasPOPCNT}, - {Name: "rdrand", Feature: &X86.HasRDRAND}, - {Name: "rdseed", Feature: &X86.HasRDSEED}, - {Name: "sse3", Feature: &X86.HasSSE3}, - {Name: "sse41", Feature: &X86.HasSSE41}, - {Name: "sse42", Feature: &X86.HasSSE42}, - {Name: "ssse3", Feature: &X86.HasSSSE3}, - - // These capabilities should always be enabled on amd64: - {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"}, - } -} - -func archInit() { - - Initialized = true - - maxID, _, _, _ := cpuid(0, 0) - - if maxID < 1 { - return - } - - _, _, ecx1, edx1 := cpuid(1, 0) - X86.HasSSE2 = isSet(26, edx1) - - X86.HasSSE3 = isSet(0, ecx1) - X86.HasPCLMULQDQ = isSet(1, ecx1) - X86.HasSSSE3 = isSet(9, ecx1) - X86.HasFMA = isSet(12, ecx1) - X86.HasCX16 = isSet(13, ecx1) - X86.HasSSE41 = isSet(19, ecx1) - X86.HasSSE42 = isSet(20, ecx1) - X86.HasPOPCNT = isSet(23, ecx1) - X86.HasAES = isSet(25, ecx1) - X86.HasOSXSAVE = isSet(27, ecx1) - X86.HasRDRAND = isSet(30, ecx1) - - var osSupportsAVX, osSupportsAVX512 bool - // For XGETBV, OSXSAVE bit is required and sufficient. - if X86.HasOSXSAVE { - eax, _ := xgetbv() - // Check if XMM and YMM registers have OS support. - osSupportsAVX = isSet(1, eax) && isSet(2, eax) - - if runtime.GOOS == "darwin" { - // Darwin doesn't save/restore AVX-512 mask registers correctly across signal handlers. - // Since users can't rely on mask register contents, let's not advertise AVX-512 support. - // See issue 49233. - osSupportsAVX512 = false - } else { - // Check if OPMASK and ZMM registers have OS support. - osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) - } - } - - X86.HasAVX = isSet(28, ecx1) && osSupportsAVX - - if maxID < 7 { - return - } - - _, ebx7, ecx7, edx7 := cpuid(7, 0) - X86.HasBMI1 = isSet(3, ebx7) - X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX - X86.HasBMI2 = isSet(8, ebx7) - X86.HasERMS = isSet(9, ebx7) - X86.HasRDSEED = isSet(18, ebx7) - X86.HasADX = isSet(19, ebx7) - - X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension - if X86.HasAVX512 { - X86.HasAVX512F = true - X86.HasAVX512CD = isSet(28, ebx7) - X86.HasAVX512ER = isSet(27, ebx7) - X86.HasAVX512PF = isSet(26, ebx7) - X86.HasAVX512VL = isSet(31, ebx7) - X86.HasAVX512BW = isSet(30, ebx7) - X86.HasAVX512DQ = isSet(17, ebx7) - X86.HasAVX512IFMA = isSet(21, ebx7) - X86.HasAVX512VBMI = isSet(1, ecx7) - X86.HasAVX5124VNNIW = isSet(2, edx7) - X86.HasAVX5124FMAPS = isSet(3, edx7) - X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7) - X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7) - X86.HasAVX512VNNI = isSet(11, ecx7) - X86.HasAVX512GFNI = isSet(8, ecx7) - X86.HasAVX512VAES = isSet(9, ecx7) - X86.HasAVX512VBMI2 = isSet(6, ecx7) - X86.HasAVX512BITALG = isSet(12, ecx7) - - eax71, _, _, _ := cpuid(7, 1) - X86.HasAVX512BF16 = isSet(5, eax71) - } - - X86.HasAMXTile = isSet(24, edx7) - X86.HasAMXInt8 = isSet(25, edx7) - X86.HasAMXBF16 = isSet(22, edx7) -} - -func isSet(bitpos uint, value uint32) bool { - return value&(1<> 63)) -) - -// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2 -// These are initialized in cpu_$GOARCH.go -// and should not be changed after they are initialized. -var hwCap uint -var hwCap2 uint - -func readHWCAP() error { - // For Go 1.21+, get auxv from the Go runtime. - if a := getAuxv(); len(a) > 0 { - for len(a) >= 2 { - tag, val := a[0], uint(a[1]) - a = a[2:] - switch tag { - case _AT_HWCAP: - hwCap = val - case _AT_HWCAP2: - hwCap2 = val - } - } - return nil - } - - buf, err := os.ReadFile(procAuxv) - if err != nil { - // e.g. on android /proc/self/auxv is not accessible, so silently - // ignore the error and leave Initialized = false. On some - // architectures (e.g. arm64) doinit() implements a fallback - // readout and will set Initialized = true again. - return err - } - bo := hostByteOrder() - for len(buf) >= 2*(uintSize/8) { - var tag, val uint - switch uintSize { - case 32: - tag = uint(bo.Uint32(buf[0:])) - val = uint(bo.Uint32(buf[4:])) - buf = buf[8:] - case 64: - tag = uint(bo.Uint64(buf[0:])) - val = uint(bo.Uint64(buf[8:])) - buf = buf[16:] - } - switch tag { - case _AT_HWCAP: - hwCap = val - case _AT_HWCAP2: - hwCap2 = val - } - } - return nil -} diff --git a/vendor/golang.org/x/sys/cpu/parse.go b/vendor/golang.org/x/sys/cpu/parse.go deleted file mode 100644 index 762b63d6..00000000 --- a/vendor/golang.org/x/sys/cpu/parse.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import "strconv" - -// parseRelease parses a dot-separated version number. It follows the semver -// syntax, but allows the minor and patch versions to be elided. -// -// This is a copy of the Go runtime's parseRelease from -// https://golang.org/cl/209597. -func parseRelease(rel string) (major, minor, patch int, ok bool) { - // Strip anything after a dash or plus. - for i := 0; i < len(rel); i++ { - if rel[i] == '-' || rel[i] == '+' { - rel = rel[:i] - break - } - } - - next := func() (int, bool) { - for i := 0; i < len(rel); i++ { - if rel[i] == '.' { - ver, err := strconv.Atoi(rel[:i]) - rel = rel[i+1:] - return ver, err == nil - } - } - ver, err := strconv.Atoi(rel) - rel = "" - return ver, err == nil - } - if major, ok = next(); !ok || rel == "" { - return - } - if minor, ok = next(); !ok || rel == "" { - return - } - patch, ok = next() - return -} diff --git a/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go deleted file mode 100644 index d87bd6b3..00000000 --- a/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux && arm64 -// +build linux,arm64 - -package cpu - -import ( - "errors" - "io" - "os" - "strings" -) - -func readLinuxProcCPUInfo() error { - f, err := os.Open("/proc/cpuinfo") - if err != nil { - return err - } - defer f.Close() - - var buf [1 << 10]byte // enough for first CPU - n, err := io.ReadFull(f, buf[:]) - if err != nil && err != io.ErrUnexpectedEOF { - return err - } - in := string(buf[:n]) - const features = "\nFeatures : " - i := strings.Index(in, features) - if i == -1 { - return errors.New("no CPU features found") - } - in = in[i+len(features):] - if i := strings.Index(in, "\n"); i != -1 { - in = in[:i] - } - m := map[string]*bool{} - - initOptions() // need it early here; it's harmless to call twice - for _, o := range options { - m[o.Name] = o.Feature - } - // The EVTSTRM field has alias "evstrm" in Go, but Linux calls it "evtstrm". - m["evtstrm"] = &ARM64.HasEVTSTRM - - for _, f := range strings.Fields(in) { - if p, ok := m[f]; ok { - *p = true - } - } - return nil -} diff --git a/vendor/golang.org/x/sys/cpu/runtime_auxv.go b/vendor/golang.org/x/sys/cpu/runtime_auxv.go deleted file mode 100644 index 5f92ac9a..00000000 --- a/vendor/golang.org/x/sys/cpu/runtime_auxv.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -// getAuxvFn is non-nil on Go 1.21+ (via runtime_auxv_go121.go init) -// on platforms that use auxv. -var getAuxvFn func() []uintptr - -func getAuxv() []uintptr { - if getAuxvFn == nil { - return nil - } - return getAuxvFn() -} diff --git a/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go deleted file mode 100644 index b975ea2a..00000000 --- a/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 -// +build go1.21 - -package cpu - -import ( - _ "unsafe" // for linkname -) - -//go:linkname runtime_getAuxv runtime.getAuxv -func runtime_getAuxv() []uintptr - -func init() { - getAuxvFn = runtime_getAuxv -} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go deleted file mode 100644 index 96134157..00000000 --- a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Recreate a getsystemcfg syscall handler instead of -// using the one provided by x/sys/unix to avoid having -// the dependency between them. (See golang.org/issue/32102) -// Moreover, this file will be used during the building of -// gccgo's libgo and thus must not used a CGo method. - -//go:build aix && gccgo -// +build aix,gccgo - -package cpu - -import ( - "syscall" -) - -//extern getsystemcfg -func gccgoGetsystemcfg(label uint32) (r uint64) - -func callgetsystemcfg(label int) (r1 uintptr, e1 syscall.Errno) { - r1 = uintptr(gccgoGetsystemcfg(uint32(label))) - e1 = syscall.GetErrno() - return -} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go deleted file mode 100644 index 904be42f..00000000 --- a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Minimal copy of x/sys/unix so the cpu package can make a -// system call on AIX without depending on x/sys/unix. -// (See golang.org/issue/32102) - -//go:build aix && ppc64 && gc -// +build aix,ppc64,gc - -package cpu - -import ( - "syscall" - "unsafe" -) - -//go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" - -//go:linkname libc_getsystemcfg libc_getsystemcfg - -type syscallFunc uintptr - -var libc_getsystemcfg syscallFunc - -type errno = syscall.Errno - -// Implemented in runtime/syscall_aix.go. -func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) -func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) - -func callgetsystemcfg(label int) (r1 uintptr, e1 errno) { - r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getsystemcfg)), 1, uintptr(label), 0, 0, 0, 0, 0) - return -} diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index 1bc92248..ab0fbb79 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -1,8 +1,8 @@ # gRPC-Go -[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) +[![codecov](https://codecov.io/gh/grpc/grpc-go/graph/badge.svg)](https://codecov.io/gh/grpc/grpc-go) The [Go][] implementation of [gRPC][]: A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 712fef4d..52d530d7 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -121,9 +121,9 @@ func (a *Attributes) String() string { return sb.String() } -func str(x any) string { +func str(x any) (s string) { if v, ok := x.(fmt.Stringer); ok { - return v.String() + return fmt.Sprint(v) } else if v, ok := x.(string); ok { return v } diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index b6377f44..d79560a2 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -39,6 +40,8 @@ import ( var ( // m is a map from name to balancer builder. m = make(map[string]Builder) + + logger = grpclog.Component("balancer") ) // Register registers the balancer builder to the balancer map. b.Name @@ -51,6 +54,12 @@ var ( // an init() function), and is not thread-safe. If multiple Balancers are // registered with the same name, the one registered last will take effect. func Register(b Builder) { + if strings.ToLower(b.Name()) != b.Name() { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name()) + } m[strings.ToLower(b.Name())] = b } @@ -70,6 +79,12 @@ func init() { // Note that the compare is done in a case-insensitive fashion. // If no builder is register with the name, nil will be returned. func Get(name string) Builder { + if strings.ToLower(name) != name { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name) + } if b, ok := m[strings.ToLower(name)]; ok { return b } diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index ff7fea10..429c389e 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -337,8 +337,8 @@ func (cc *ClientConn) exitIdleMode() error { return errConnClosing } if cc.idlenessState != ccIdlenessStateIdle { - cc.mu.Unlock() channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState) + cc.mu.Unlock() return nil } @@ -404,13 +404,13 @@ func (cc *ClientConn) exitIdleMode() error { // name resolver, load balancer and any subchannels. func (cc *ClientConn) enterIdleMode() error { cc.mu.Lock() + defer cc.mu.Unlock() + if cc.conns == nil { - cc.mu.Unlock() return ErrClientConnClosing } if cc.idlenessState != ccIdlenessStateActive { - channelz.Errorf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState) - cc.mu.Unlock() + channelz.Warningf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState) return nil } @@ -431,14 +431,14 @@ func (cc *ClientConn) enterIdleMode() error { cc.balancerWrapper.enterIdleMode() cc.csMgr.updateState(connectivity.Idle) cc.idlenessState = ccIdlenessStateIdle - cc.mu.Unlock() + cc.addTraceEvent("entering idle mode") go func() { - cc.addTraceEvent("entering idle mode") for ac := range conns { ac.tearDown(errConnIdling) } }() + return nil } @@ -804,6 +804,12 @@ func init() { internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { return cc.csMgr.pubSub.Subscribe(s) } + internal.EnterIdleModeForTesting = func(cc *ClientConn) error { + return cc.enterIdleMode() + } + internal.ExitIdleModeForTesting = func(cc *ClientConn) error { + return cc.exitIdleMode() + } } func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 1fd0d5c1..cfc9fd85 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -644,6 +644,7 @@ func defaultDialOptions() dialOptions { UseProxy: true, }, recvBufferPool: nopBufferPool{}, + idleTimeout: 30 * time.Minute, } } @@ -680,8 +681,8 @@ func WithResolvers(rs ...resolver.Builder) DialOption { // channel will exit idle mode when the Connect() method is called or when an // RPC is initiated. // -// By default this feature is disabled, which can also be explicitly configured -// by passing zero to this function. +// A default timeout of 30 minutes will be used if this dial option is not set +// at dial time and idleness can be disabled by passing a timeout of zero. // // # Experimental // diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 69d5580b..5ebf88d7 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -38,6 +38,10 @@ const Identity = "identity" // Compressor is used for compressing and decompressing when sending or // receiving messages. +// +// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`, +// gRPC will invoke it to determine the size of the buffer allocated for the +// result of decompression. A return value of -1 indicates unknown size. type Compressor interface { // Compress writes the data written to wc to w after compressing it. If an // error occurs while initializing the compressor, that error is returned @@ -51,15 +55,6 @@ type Compressor interface { // coding header. The result must be static; the result cannot change // between calls. Name() string - // If a Compressor implements - // DecompressedSize(compressedBytes []byte) int, gRPC will call it - // to determine the size of the buffer allocated for the result of decompression. - // Return -1 to indicate unknown size. - // - // Experimental - // - // Notice: This API is EXPERIMENTAL and may be changed or removed in a - // later release. } var registeredCompressor = make(map[string]Compressor) diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index 5fc0ee3d..fed1c011 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -23,6 +23,8 @@ package backoff import ( + "context" + "errors" "time" grpcbackoff "google.golang.org/grpc/backoff" @@ -71,3 +73,37 @@ func (bc Exponential) Backoff(retries int) time.Duration { } return time.Duration(backoff) } + +// ErrResetBackoff is the error to be returned by the function executed by RunF, +// to instruct the latter to reset its backoff state. +var ErrResetBackoff = errors.New("reset backoff state") + +// RunF provides a convenient way to run a function f repeatedly until the +// context expires or f returns a non-nil error that is not ErrResetBackoff. +// When f returns ErrResetBackoff, RunF continues to run f, but resets its +// backoff state before doing so. backoff accepts an integer representing the +// number of retries, and returns the amount of time to backoff. +func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) { + attempt := 0 + timer := time.NewTimer(0) + for ctx.Err() == nil { + select { + case <-timer.C: + case <-ctx.Done(): + timer.Stop() + return + } + + err := f() + if errors.Is(err, ErrResetBackoff) { + timer.Reset(0) + attempt = 0 + continue + } + if err != nil { + return + } + timer.Reset(backoff(attempt)) + attempt++ + } +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index c8a8c76d..0d94c63e 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -175,6 +175,12 @@ var ( // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra // metadata to RPCs. GRPCResolverSchemeExtraMetadata string = "xds" + + // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. + EnterIdleModeForTesting any // func(*grpc.ClientConn) error + + // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. + ExitIdleModeForTesting any // func(*grpc.ClientConn) error ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index 4cf85cad..03ef2fed 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -43,6 +43,34 @@ type Status struct { s *spb.Status } +// NewWithProto returns a new status including details from statusProto. This +// is meant to be used by the gRPC library only. +func NewWithProto(code codes.Code, message string, statusProto []string) *Status { + if len(statusProto) != 1 { + // No grpc-status-details bin header, or multiple; just ignore. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + st := &spb.Status{} + if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil { + // Probably not a google.rpc.Status proto; do not provide details. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + if st.Code == int32(code) { + // The codes match between the grpc-status header and the + // grpc-status-details-bin header; use the full details proto. + return &Status{s: st} + } + return &Status{ + s: &spb.Status{ + Code: int32(codes.Internal), + Message: fmt.Sprintf( + "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v", + code, message, st, + ), + }, + } +} + // New returns a Status representing c and msg. func New(c codes.Code, msg string) *Status { return &Status{s: &spb.Status{Code: int32(c), Message: msg}} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 98f80e3f..17f7a21b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -220,18 +220,20 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro h.Set("Grpc-Message", encodeGrpcMessage(m)) } + s.hdrMu.Lock() if p := st.Proto(); p != nil && len(p.Details) > 0 { + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. panic(err) } - h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes)) } - if md := s.Trailer(); len(md) > 0 { - for k, vv := range md { + if len(s.trailer) > 0 { + for k, vv := range s.trailer { // Clients don't tolerate reading restricted headers after some non restricted ones were sent. if isReservedHeader(k) { continue @@ -243,6 +245,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } } + s.hdrMu.Unlock() }) if err == nil { // transport has not been closed @@ -287,7 +290,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { } // writeCustomHeaders sets custom headers set on the stream via SetHeader -// on the first write call (Write, WriteHeader, or WriteStatus). +// on the first write call (Write, WriteHeader, or WriteStatus) func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { h := ht.rw.Header() @@ -344,7 +347,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { // With this transport type there will be exactly 1 stream: this HTTP request. ctx := ht.req.Context() diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index badab8ac..d6f5c493 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -1399,7 +1399,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { mdata = make(map[string][]string) contentTypeErr = "malformed header: missing HTTP content-type" grpcMessage string - statusGen *status.Status recvCompress string httpStatusCode *int httpStatusErr string @@ -1434,12 +1433,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { rawStatusCode = codes.Code(uint32(code)) case "grpc-message": grpcMessage = decodeGrpcMessage(hf.Value) - case "grpc-status-details-bin": - var err error - statusGen, err = decodeGRPCStatusDetails(hf.Value) - if err != nil { - headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) - } case ":status": if hf.Value == "200" { httpStatusErr = "" @@ -1548,14 +1541,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - if statusGen == nil { - statusGen = status.New(rawStatusCode, grpcMessage) - } + status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) // If client received END_STREAM from server while stream was still active, // send RST_STREAM. rstStream := s.getState() == streamActive - t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, statusGen, mdata, true) + t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true) } // readServerPreface reads and handles the initial settings frame from the diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index c06db679..6fa1eb41 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -342,7 +342,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -561,7 +561,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if t.inTapHandle != nil { var err error - if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil { t.mu.Unlock() if t.logger.V(logLevel) { t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) @@ -592,7 +592,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) } - s.ctx = traceCtx(s.ctx, s.method) for _, sh := range t.stats { s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ @@ -630,7 +629,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { +func (t *http2Server) HandleStreams(handle func(*Stream)) { defer close(t.readerDone) for { t.controlBuf.throttle() @@ -665,7 +664,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if err := t.operateHeaders(frame, handle, traceCtx); err != nil { + if err := t.operateHeaders(frame, handle); err != nil { t.Close(err) break } @@ -1053,12 +1052,15 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) if p := st.Proto(); p != nil && len(p.Details) > 0 { + // Do not use the user's grpc-status-details-bin (if present) if we are + // even attempting to set our own. + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) } else { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)}) } } diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 19581400..dc29d590 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -34,12 +34,9 @@ import ( "time" "unicode/utf8" - "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) const ( @@ -88,6 +85,8 @@ var ( } ) +var grpcStatusDetailsBinHeader = "grpc-status-details-bin" + // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. @@ -103,7 +102,6 @@ func isReservedHeader(hdr string) bool { "grpc-message", "grpc-status", "grpc-timeout", - "grpc-status-details-bin", // Intentionally exclude grpc-previous-rpc-attempts and // grpc-retry-pushback-ms, which are "reserved", but their API // intentionally works via metadata. @@ -154,18 +152,6 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } -func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { - v, err := decodeBinHeader(rawDetails) - if err != nil { - return nil, err - } - st := &spb.Status{} - if err = proto.Unmarshal(v, st); err != nil { - return nil, err - } - return status.FromProto(st), nil -} - type timeoutUnit uint8 const ( diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 74a811fc..aac056e7 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -698,7 +698,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(func(*Stream), func(context.Context, string) context.Context) + HandleStreams(func(*Stream)) // WriteHeader sends the header metadata for the given stream. // WriteHeader may not be called on all streams. diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index eeae92fb..8f60d421 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -983,7 +983,7 @@ func (s *Server) serveStreams(st transport.ServerTransport) { f := func() { defer streamQuota.release() defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) + s.handleStream(st, stream) } if s.opts.numServerWorkers > 0 { @@ -995,12 +995,6 @@ func (s *Server) serveStreams(st transport.ServerTransport) { } } go f() - }, func(ctx context.Context, method string) context.Context { - if !EnableTracing { - return ctx - } - tr := trace.New("grpc.Recv."+methodFamily(method), method) - return trace.NewContext(ctx, tr) }) wg.Wait() } @@ -1049,30 +1043,6 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.serveStreams(st) } -// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. -// If tracing is not enabled, it returns nil. -func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { - if !EnableTracing { - return nil - } - tr, ok := trace.FromContext(stream.Context()) - if !ok { - return nil - } - - trInfo = &traceInfo{ - tr: tr, - firstLine: firstLine{ - client: false, - remoteAddr: st.RemoteAddr(), - }, - } - if dl, ok := stream.Context().Deadline(); ok { - trInfo.firstLine.deadline = time.Until(dl) - } - return trInfo -} - func (s *Server) addConn(addr string, st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() @@ -1133,7 +1103,7 @@ func (s *Server) incrCallsFailed() { atomic.AddInt64(&s.czData.callsFailed, 1) } -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) @@ -1152,7 +1122,7 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str err = t.Write(stream, hdr, payload, opts) if err == nil { for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) } } return err @@ -1194,7 +1164,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info } } -func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { shs := s.opts.statsHandlers if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -1208,7 +1178,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. IsClientStream: false, IsServerStream: false, } - sh.HandleRPC(stream.Context(), statsBegin) + sh.HandleRPC(ctx, statsBegin) } if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) @@ -1240,7 +1210,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - sh.HandleRPC(stream.Context(), end) + sh.HandleRPC(ctx, end) } if channelz.IsOn() { @@ -1262,7 +1232,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } } if len(binlogs) != 0 { - ctx := stream.Context() md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, @@ -1348,7 +1317,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } for _, sh := range shs { - sh.HandleRPC(stream.Context(), &stats.InPayload{ + sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, Length: len(d), @@ -1362,7 +1331,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: d, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), cm) + binlog.Log(ctx, cm) } } if trInfo != nil { @@ -1370,7 +1339,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } return nil } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) if appErr != nil { appStatus, ok := status.FromError(appErr) @@ -1395,7 +1364,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Header: h, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) + binlog.Log(ctx, sh) } } st := &binarylog.ServerTrailer{ @@ -1403,7 +1372,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return appErr @@ -1418,7 +1387,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if stream.SendCompress() != sendCompressorName { comp = encoding.GetCompressor(stream.SendCompress()) } - if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err @@ -1445,8 +1414,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), st) + binlog.Log(ctx, sh) + binlog.Log(ctx, st) } } return err @@ -1460,8 +1429,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: reply, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), sm) + binlog.Log(ctx, sh) + binlog.Log(ctx, sm) } } if channelz.IsOn() { @@ -1479,7 +1448,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return t.WriteStatus(stream, statusOK) @@ -1521,7 +1490,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf } } -func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } @@ -1535,10 +1504,10 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp IsServerStream: sd.ServerStreams, } for _, sh := range shs { - sh.HandleRPC(stream.Context(), statsBegin) + sh.HandleRPC(ctx, statsBegin) } } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, t: t, @@ -1574,7 +1543,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp end.Error = toRPCErr(err) } for _, sh := range shs { - sh.HandleRPC(stream.Context(), end) + sh.HandleRPC(ctx, end) } } @@ -1616,7 +1585,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp logEntry.PeerAddr = peer.Addr } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), logEntry) + binlog.Log(ctx, logEntry) } } @@ -1694,7 +1663,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } t.WriteStatus(ss.s, appStatus) @@ -1712,33 +1681,50 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return t.WriteStatus(ss.s, statusOK) } -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { + ctx := stream.Context() + var ti *traceInfo + if EnableTracing { + tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) + ctx = trace.NewContext(ctx, tr) + ti = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: t.RemoteAddr(), + }, + } + if dl, ok := ctx.Deadline(); ok { + ti.firstLine.deadline = time.Until(dl) + } + } + sm := stream.Method() if sm != "" && sm[0] == '/' { sm = sm[1:] } pos := strings.LastIndex(sm, "/") if pos == -1 { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) + ti.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } return } @@ -1748,17 +1734,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { - s.processUnaryRPC(t, stream, srv, md, trInfo) + s.processUnaryRPC(ctx, t, stream, srv, md, ti) return } if sd, ok := srv.streams[method]; ok { - s.processStreamingRPC(t, stream, srv, sd, trInfo) + s.processStreamingRPC(ctx, t, stream, srv, sd, ti) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) return } var errDesc string @@ -1767,19 +1753,19 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str } else { errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) } - if trInfo != nil { - trInfo.tr.LazyPrintf("%s", errDesc) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyPrintf("%s", errDesc) + ti.tr.SetError() } if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } } diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index bfa5dfa4..07f01257 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -27,6 +27,8 @@ package tap import ( "context" + + "google.golang.org/grpc/metadata" ) // Info defines the relevant information needed by the handles. @@ -34,6 +36,10 @@ type Info struct { // FullMethodName is the string of grpc method (in the format of // /package.service/method). FullMethodName string + + // Header contains the header metadata received. + Header metadata.MD + // TODO: More to be added. } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 724ad210..6d2cadd7 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.58.3" +const Version = "1.59.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index bbc9e2e3..bb480f1f 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -93,6 +93,9 @@ git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpc # - Ensure all ptypes proto packages are renamed when importing. not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" +# - Ensure all usages of grpc_testing package are renamed when importing. +not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" + # - Ensure all xds proto imports are renamed to *pb or *grpc. git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' diff --git a/vendor/modules.txt b/vendor/modules.txt index 937421d3..87508edf 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,3 +1,14 @@ +# github.com/absmach/magistrala v0.11.1-0.20231102134813-44408395e6a3 +## explicit; go 1.21 +github.com/absmach/magistrala +github.com/absmach/magistrala/internal/apiutil +github.com/absmach/magistrala/logger +github.com/absmach/magistrala/pkg/errors +github.com/absmach/magistrala/pkg/messaging +github.com/absmach/magistrala/pkg/sdk/go +github.com/absmach/magistrala/pkg/transformers +github.com/absmach/magistrala/pkg/transformers/senml +github.com/absmach/magistrala/pkg/uuid # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile @@ -7,19 +18,6 @@ github.com/caarlos0/env/v9 # github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 -# github.com/creack/pty v1.1.18 -## explicit; go 1.13 -github.com/creack/pty -# github.com/eclipse/paho.mqtt.golang v1.4.3 -## explicit; go 1.18 -github.com/eclipse/paho.mqtt.golang -github.com/eclipse/paho.mqtt.golang/packets -# github.com/edgexfoundry/go-mod-core-contracts v0.1.70 -## explicit; go 1.13 -github.com/edgexfoundry/go-mod-core-contracts/clients -github.com/edgexfoundry/go-mod-core-contracts/clients/interfaces -github.com/edgexfoundry/go-mod-core-contracts/clients/types -github.com/edgexfoundry/go-mod-core-contracts/models # github.com/fxamacker/cbor/v2 v2.5.0 ## explicit; go 1.12 github.com/fxamacker/cbor/v2 @@ -57,59 +55,12 @@ github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp -# github.com/google/uuid v1.3.1 -## explicit -github.com/google/uuid -# github.com/gorilla/websocket v1.5.0 -## explicit; go 1.12 -github.com/gorilla/websocket -# github.com/klauspost/compress v1.17.0 -## explicit; go 1.18 -github.com/klauspost/compress/flate -# github.com/mainflux/agent v0.11.1-0.20230724130550-0cd3f4c8c27c -## explicit; go 1.20 -github.com/mainflux/agent/pkg/agent -github.com/mainflux/agent/pkg/bootstrap -github.com/mainflux/agent/pkg/edgex -github.com/mainflux/agent/pkg/encoder -github.com/mainflux/agent/pkg/terminal -# github.com/mainflux/export v0.1.1-0.20230724124847-67d0bc7f38cb -## explicit; go 1.20 -github.com/mainflux/export/pkg/config -# github.com/mainflux/mainflux v0.0.0-20231021215047-ab832aff1b52 -## explicit; go 1.21 -github.com/mainflux/mainflux -github.com/mainflux/mainflux/bootstrap -github.com/mainflux/mainflux/internal/apiutil -github.com/mainflux/mainflux/logger -github.com/mainflux/mainflux/pkg/clients -github.com/mainflux/mainflux/pkg/errors -github.com/mainflux/mainflux/pkg/messaging -github.com/mainflux/mainflux/pkg/sdk/go -github.com/mainflux/mainflux/pkg/transformers -github.com/mainflux/mainflux/pkg/transformers/senml -github.com/mainflux/mainflux/pkg/uuid # github.com/mainflux/senml v1.5.0 ## explicit; go 1.13 github.com/mainflux/senml -# github.com/matttproud/golang_protobuf_extensions v1.0.4 -## explicit; go 1.9 -github.com/matttproud/golang_protobuf_extensions/pbutil -# github.com/nats-io/nats.go v1.30.2 -## explicit; go 1.20 -github.com/nats-io/nats.go -github.com/nats-io/nats.go/encoders/builtin -github.com/nats-io/nats.go/internal/parser -github.com/nats-io/nats.go/util -# github.com/nats-io/nkeys v0.4.5 +# github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 ## explicit; go 1.19 -github.com/nats-io/nkeys -# github.com/nats-io/nuid v1.0.1 -## explicit -github.com/nats-io/nuid -# github.com/pelletier/go-toml v1.9.5 -## explicit; go 1.12 -github.com/pelletier/go-toml +github.com/matttproud/golang_protobuf_extensions/v2/pbutil # github.com/prometheus/client_golang v1.17.0 ## explicit; go 1.19 github.com/prometheus/client_golang/prometheus @@ -118,8 +69,8 @@ github.com/prometheus/client_golang/prometheus/promhttp # github.com/prometheus/client_model v0.5.0 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.44.0 -## explicit; go 1.18 +# github.com/prometheus/common v0.45.0 +## explicit; go 1.20 github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model @@ -131,17 +82,6 @@ github.com/prometheus/procfs/internal/util # github.com/x448/float16 v0.8.4 ## explicit; go 1.11 github.com/x448/float16 -# golang.org/x/crypto v0.14.0 -## explicit; go 1.17 -golang.org/x/crypto/blake2b -golang.org/x/crypto/curve25519 -golang.org/x/crypto/curve25519/internal/field -golang.org/x/crypto/ed25519 -golang.org/x/crypto/internal/alias -golang.org/x/crypto/internal/poly1305 -golang.org/x/crypto/nacl/box -golang.org/x/crypto/nacl/secretbox -golang.org/x/crypto/salsa20/salsa # golang.org/x/exp v0.0.0-20231006140011-7918f672742d ## explicit; go 1.20 golang.org/x/exp/constraints @@ -152,17 +92,13 @@ golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna -golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries -golang.org/x/net/proxy golang.org/x/net/trace # golang.org/x/sync v0.4.0 ## explicit; go 1.17 golang.org/x/sync/errgroup -golang.org/x/sync/semaphore # golang.org/x/sys v0.13.0 ## explicit; go 1.17 -golang.org/x/sys/cpu golang.org/x/sys/unix golang.org/x/sys/windows # golang.org/x/text v0.13.0 @@ -171,10 +107,10 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a +# google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.58.3 +# google.golang.org/grpc v1.59.0 ## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes From 907ccb0c6212e523ec8caa6c48528fd3956fdea4 Mon Sep 17 00:00:00 2001 From: ianmuchyri Date: Tue, 7 Nov 2023 16:01:12 +0300 Subject: [PATCH 2/6] update agent import Signed-off-by: ianmuchyri --- go.mod | 11 +- go.sum | 939 +++++++++++++++++++++++++++++ ui/service.go | 171 +++--- ui/web/static/css/styles.css | 18 +- ui/web/template/header.html | 2 +- ui/web/template/login.html | 7 +- ui/web/template/navbar.html | 10 +- ui/web/template/resetpassword.html | 3 +- vendor/modules.txt | 40 ++ 9 files changed, 1098 insertions(+), 103 deletions(-) diff --git a/go.mod b/go.mod index 2763dfd6..0dbddc48 100644 --- a/go.mod +++ b/go.mod @@ -3,12 +3,15 @@ module github.com/absmach/magistrala-ui go 1.21.0 require ( + github.com/absmach/agent v0.0.0-20231107115142-c8b509f24d50 github.com/absmach/magistrala v0.11.1-0.20231102134813-44408395e6a3 github.com/caarlos0/env/v9 v9.0.0 + github.com/eclipse/paho.mqtt.golang v1.4.3 github.com/go-chi/chi/v5 v5.0.10 github.com/go-kit/kit v0.13.0 github.com/go-zoo/bone v1.3.0 github.com/golang-jwt/jwt v3.2.2+incompatible + github.com/mainflux/senml v1.5.0 github.com/prometheus/client_golang v1.17.0 golang.org/x/exp v0.0.0-20231006140011-7918f672742d golang.org/x/sync v0.4.0 @@ -18,13 +21,19 @@ require ( require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/creack/pty v1.1.18 // indirect + github.com/edgexfoundry/go-mod-core-contracts v0.1.70 // indirect github.com/fxamacker/cbor/v2 v2.5.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/mainflux/senml v1.5.0 // indirect + github.com/google/uuid v1.4.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/mainflux/export v0.1.1-0.20230724124847-67d0bc7f38cb // indirect + github.com/mainflux/mainflux v0.12.0 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common v0.45.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect diff --git a/go.sum b/go.sum index e9cded9d..96589ac5 100644 --- a/go.sum +++ b/go.sum @@ -1,75 +1,397 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Microsoft/go-winio v0.4.7/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/absmach/agent v0.0.0-20231107115142-c8b509f24d50 h1:RyDGAkOtIuN34HEJ/UDTc0r7qZFO5jBx5cB3wmUtPJE= +github.com/absmach/agent v0.0.0-20231107115142-c8b509f24d50/go.mod h1:/S3WufAqHTSU6MEq6cN0g6m21RhPX8dxqubYh08vbU8= github.com/absmach/magistrala v0.11.1-0.20231102134813-44408395e6a3 h1:g5dSaPtjj9mNnz2cMJ076MRKSnrOcMjW8BsJ7Kbzd7s= github.com/absmach/magistrala v0.11.1-0.20231102134813-44408395e6a3/go.mod h1:ebPpg3UNO6ier1Ic2jBHkd8VUDD62707JRacj4UwGkM= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alexbrainman/sspi v0.0.0-20180613141037-e580b900e9f5/go.mod h1:976q2ETgjT2snVCf2ZaBnyBbVoPERGjUz+0sofzEfro= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= +github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= github.com/caarlos0/env/v7 v7.1.0 h1:9lzTF5amyQeWHZzuZeKlCb5FWSUxpG1js43mhbY8ozg= github.com/caarlos0/env/v7 v7.1.0/go.mod h1:LPPWniDUq4JaO6Q41vtlyikhMknqymCLBw0eX4dcH1E= github.com/caarlos0/env/v9 v9.0.0 h1:SI6JNsOA+y5gj9njpgybykATIylrRMklbs5ch6wO6pc= github.com/caarlos0/env/v9 v9.0.0/go.mod h1:ye5mlCVMYh6tZ+vCgrs/B95sj88cg5Tlnc0XIzgZ020= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.0.0+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/cenkalti/backoff/v4 v4.1.0/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/containerd/continuity v0.0.0-20180416230128-c6cef3483023/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denisenkom/go-mssqldb v0.0.0-20191001013358-cfbb681360f0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dsnet/golib/memfile v0.0.0-20190531212259-571cdbcff553/go.mod h1:tXGNW9q3RwvWt1VV2qrRKlSSz0npnh12yftCSCy2T64= +github.com/dsnet/golib/memfile v0.0.0-20200723050859-c110804dfa93/go.mod h1:tXGNW9q3RwvWt1VV2qrRKlSSz0npnh12yftCSCy2T64= +github.com/dustin/go-coap v0.0.0-20170214053734-ddcc80675fa4/go.mod h1:as2rZ2aojRzZF8bGx1bPAn1yi9ICG6LwkiPOj6PBtjc= +github.com/dustin/go-coap v0.0.0-20190908170653-752e0f79981e/go.mod h1:as2rZ2aojRzZF8bGx1bPAn1yi9ICG6LwkiPOj6PBtjc= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= github.com/eclipse/paho.mqtt.golang v1.4.3 h1:2kwcUGn8seMUfWndX0hGbvH8r7crgcJguQNCyp70xik= github.com/eclipse/paho.mqtt.golang v1.4.3/go.mod h1:CSYvoAlsMkhYOXh/oKyxa8EcBci6dVkLCbo5tTC1RIE= +github.com/edgexfoundry/go-mod-core-contracts v0.1.70 h1:MYqxk52m5J37IlNRI0NxJGEtIx+1qdPp1pdYcMYIuug= +github.com/edgexfoundry/go-mod-core-contracts v0.1.70/go.mod h1:Bt+lYZeV02ndr/Jr6wnA3em2J+VTzZ1c0KVtqkNKdpg= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fxamacker/cbor v1.3.2/go.mod h1:Uy2lR31/2WfmW0yiA4i3t+we5kF3B/wzKsttcux+i/g= github.com/fxamacker/cbor/v2 v2.2.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE= github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= +github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/go-acme/lego v2.7.2+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= github.com/go-chi/chi/v5 v5.0.10 h1:rLz5avzKpjqxrYwXNfmjkrYYXOyLJd37pz53UFHC6vk= github.com/go-chi/chi/v5 v5.0.10/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU= github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ocf/go-coap/v2 v2.0.4-0.20200728125043-f38b86f047a7/go.mod h1:X9wVKcaOSx7wBxKcvrWgMQq1R2DNeA7NBLW2osIb8TM= +github.com/go-ocf/kit v0.0.0-20200728130040-4aebdb6982bc/go.mod h1:TIsoMT/iB7t9P6ahkcOnsmvS83SIJsv9qXRfz/yLf6M= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.3.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/go-redis/redis v6.15.0+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-zoo/bone v1.3.0 h1:PY6sHq37FnQhj+4ZyqFIzJQHvrrGx0GEc3vTZZC/OsI= github.com/go-zoo/bone v1.3.0/go.mod h1:HI3Lhb7G3UQcAwEhOJ2WyNcsFtQX1WYHa0Hl4OBbhW8= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.1/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/logger v1.0.0/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs= +github.com/gobuffalo/logger v1.0.1/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q= +github.com/gobuffalo/packr v1.30.1/go.mod h1:ljMyFO2EcrnzsHsN99cvbq055Y9OhRrIaviy289eRuk= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/packr/v2 v2.5.1/go.mod h1:8f9c96ITobJlPzI44jj+4tHnEKNt0xXWSVlXRN9X1Iw= +github.com/gobuffalo/packr/v2 v2.7.1/go.mod h1:qYEvAazPaVxy7Y7KR0W8qYEE+RymX74kETFqjFoFlOc= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gocql/gocql v0.0.0-20181106112037-68ae1e384be4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= +github.com/gocql/gocql v0.0.0-20200526081602-cd04bd7f22a7/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= +github.com/gocql/gocql v0.0.0-20200624222514-34081eda590e/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= +github.com/godror/godror v0.13.3/go.mod h1:2ouUT4kdhUBk7TAkHWD4SN0CdI0pgEQbo8FVHhbSKWg= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopcua/opcua v0.1.6/go.mod h1:INwnDoRxmNWAt7+tzqxuGqQkSF2c1C69VAL0c2q6AcY= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8 h1:iBt4Ew4XEGLfh6/bPk4rSYmuZJGizr6/x/AEizP0CQc= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8/go.mod h1:aiJI+PIApBRQG7FZTEBx5GiiX+HbOHilUdNxUZi4eV0= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-sockaddr v1.0.5 h1:dvk7TIXCZpmfOlM+9mlcrWmWjw/wlKT+VDq2wMvfPJU= github.com/hashicorp/go-sockaddr v1.0.5/go.mod h1:uoUUmtwU7n9Dv3O4SNLeFvg0SxQ3lyjsj6+CCykpaxI= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q= github.com/hashicorp/vault/api v1.10.0 h1:/US7sIjWN6Imp4o/Rj1Ce2Nr5bki/AXi9vAW3p2tOJQ= github.com/hashicorp/vault/api v1.10.0/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8= +github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hokaccha/go-prettyjson v0.0.0-20180920040306-f579f869bbfe/go.mod h1:pFlLw2CfqZiIBOx6BuCeRLCrfxBJipTY0nIOF/VbGcI= +github.com/hokaccha/go-prettyjson v0.0.0-20190818114111-108c894c2c0e/go.mod h1:pFlLw2CfqZiIBOx6BuCeRLCrfxBJipTY0nIOF/VbGcI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/flux v0.65.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY= +github.com/influxdata/influxdb v1.6.4/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= +github.com/influxdata/influxdb v1.8.0/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ= +github.com/influxdata/influxdb v1.8.1/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/influxql v1.1.0/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZgb3N+tzevNgo= +github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= +github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= +github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= +github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= +github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= @@ -80,53 +402,360 @@ github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw= github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx/v5 v5.4.3 h1:cxFyXhxlvAifxnkKKdlxv8XqUf59tDlYjnV5YYfsJJY= github.com/jackc/pgx/v5 v5.4.3/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSljmHRA= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.2.0/go.mod h1:T1hnNppQsBtxW0tCHMHTkAt8n/sABdzZgZdoFrZaZNM= +github.com/jcmturner/rpc/v2 v2.0.2/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmoiron/sqlx v1.2.1-0.20190319043955-cdf62fdf55f6/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= +github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/karrick/godirwalk v1.10.12/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.10.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= +github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/lestrrat-go/iter v0.0.0-20200422075355-fc1769541911/go.mod h1:zIdgO1mRKhn8l9vrZJZz9TUMMFbQbLeTsbqPDrJ/OJc= +github.com/lestrrat-go/jwx v1.0.2/go.mod h1:TPF17WiSFegZo+c20fdpw49QD+/7n4/IsGvEmCSWwT0= +github.com/lestrrat-go/pdebug v0.0.0-20200204225717-4d6bd78da58d/go.mod h1:B06CSso/AWxiPejj+fheUINGeBKeeEZNt8w+EoU7+L8= +github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.6.0/go.mod h1:4vXEAYvW1fRQ2/FhZ78H73A60MHw1geSm145z2mdY1g= +github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mainflux/export v0.1.1-0.20230724124847-67d0bc7f38cb h1:+Zz1+/lX3bmt2AWyJHG2yVsJ7vqa0Q2XIYagG1dHj5c= +github.com/mainflux/export v0.1.1-0.20230724124847-67d0bc7f38cb/go.mod h1:speJE1lnq2emg5o4DqEoOe6nOgFRd0AEYSBBV2bCGKg= +github.com/mainflux/mainflux v0.0.0-20191223163044-f42f2095bab4/go.mod h1:K3ghSIpAqwv5F/t30LO57+11S7tE97ur2Z6wWEHa2CA= +github.com/mainflux/mainflux v0.0.0-20200314190902-c91fe0d45353/go.mod h1:yijZGLNkcDOPJfPhRMwuu5ZFcNHqDHzWurN4q1rOT/Q= +github.com/mainflux/mainflux v0.0.0-20200324100741-6ffa916ed229/go.mod h1:mde8cQhTPjLulu2pn/x8OgQ2S++lDufS+ODE93zuHjY= +github.com/mainflux/mainflux v0.0.0-20200512161904-df6f5adff8e4/go.mod h1:2caJ68GaQPVNe85z5pNJMJk0CflgcS3XWghYsJSBesU= +github.com/mainflux/mainflux v0.11.1-0.20200603183352-7f3e2c1b21ed/go.mod h1:8jwcwH3MKYgoQks9BBHq19Br25ElzW25vteZX7tWZ+w= +github.com/mainflux/mainflux v0.12.0 h1:UcZpOlGgkXi27gMFVmppAw3Mi6gTjfA0wyUELTTcA3E= +github.com/mainflux/mainflux v0.12.0/go.mod h1:d5L91byP5g4Y8dMzd+4LDjXuMy2oM80qg5nt5jMXN5I= +github.com/mainflux/mproxy v0.1.3/go.mod h1:/BdaBfgye1GNCD+eat4ipFamy9IEVRH5nhZS0yEShVg= +github.com/mainflux/mproxy v0.1.5/go.mod h1:MBLtv/RvhT8QsmXz4g3GxkRaP8PqlVqBWeqvw9QmO8k= +github.com/mainflux/mproxy v0.1.8/go.mod h1:NnhrUDytvV4pCI5LDuet86/WrymrUaX0/x1tlUHTKhU= +github.com/mainflux/mproxy v0.2.1-0.20200603122422-b08e1fa2cf5c/go.mod h1:lFD56bDgNTslCLoTlZfo2DyQbkQOnoxEXmbE4VumRm4= +github.com/mainflux/mproxy v0.2.2/go.mod h1:+T8h6ZupYPl6Lx9A0hqpcUQtcLyOBdzm/lfkjvPfGXo= github.com/mainflux/mproxy v0.3.1-0.20231022160500-0e0db9e1642c h1:iF14azUs+lEzWPgVe6+lPqSjKGuN0d66cCFd2Cxt9Mo= github.com/mainflux/mproxy v0.3.1-0.20231022160500-0e0db9e1642c/go.mod h1:NruAIEwk3udRzb8ZOrbA77Zo0eix3W8pTGOA8E6hvpg= +github.com/mainflux/senml v1.0.0/go.mod h1:g9i8pj4WMs29KkUpXivbe/PP0qJd1kt3b1CF77S8A3s= +github.com/mainflux/senml v1.0.1/go.mod h1:SMX76mM5yenjLVjZOM27+njCGkP+AA64O46nRQiBRlE= github.com/mainflux/senml v1.5.0 h1:GAd1y1eMohfa6sVYcr2iQfVfkkh9l/q7B1TWF5L68xs= github.com/mainflux/senml v1.5.0/go.mod h1:SMX76mM5yenjLVjZOM27+njCGkP+AA64O46nRQiBRlE= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-oci8 v0.0.7/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.12.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.29/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/go-nats v1.6.0/go.mod h1:+t7RHT5ApZebkrQdnn6AhQJmhJJiKAvJUio1PiiCtj0= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats-server/v2 v2.1.4/go.mod h1:Jw1Z28soD/QasIA2uWjXyM9El1jly3YwyFOuR8tH1rg= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nats.go v1.10.0/go.mod h1:AjGArbfyR50+afOUotNX2Xs5SYHf+CoOa5HH1eEl2HE= github.com/nats-io/nats.go v1.31.0 h1:/WFBHEc/dOKBF6qf1TZhrdEfTmOZ5JzdJ+Y3m6Y/p7E= github.com/nats-io/nats.go v1.31.0/go.mod h1:di3Bm5MLsoB4Bx61CBTsxuarI36WbhAwOm8QrW39+i8= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.4/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= github.com/nats-io/nkeys v0.4.6 h1:IzVe95ru2CT6ta874rt9saQRkWfe2nFj1NtvYSLqMzY= github.com/nats-io/nkeys v0.4.6/go.mod h1:4DxZNzenSVd1cYQoAa8948QY3QDjrHfcfVADymtkpts= +github.com/nats-io/nuid v1.0.0/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/oklog/ulid/v2 v2.0.2/go.mod h1:mtBL0Qe/0HAx6/a4Z30qxVIAL1eQDweXq5lxOEiwQ68= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/ory/dockertest v3.3.0+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/ory/dockertest/v3 v3.6.0/go.mod h1:4ZOpj8qBUmh8fcBSVzkH2bws2s91JdGvHUqan4GHEuQ= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= +github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= +github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= +github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pion/dtls/v2 v2.0.1-0.20200503085337-8e86b3a7d585/go.mod h1:/GahSOC8ZY/+17zkaGJIG4OUkSGAcZu/N/g3roBOCkM= +github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= +github.com/pion/transport v0.10.0/go.mod h1:BnHnUipd0rZQyTVB2SBGojFHT9CBt5C5TcsJSQGkvSE= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= +github.com/plgd-dev/go-coap/v2 v2.0.4-0.20200819112225-8eb712b901bc/go.mod h1:+tCi9Q78H/orWRtpVWyBgrr4vKFo2zYtbbxUllerBp4= +github.com/plgd-dev/go-coap/v2 v2.0.4/go.mod h1:DccQmYY6swDlNlOCQOAX+SXTI9laSfGytskmeeNWmms= +github.com/plgd-dev/kit v0.0.0-20200819113605-d5fcf3e94f63/go.mod h1:Yl9zisyXfPdtP9hTWlJqjJYXmgU/jtSDKttz9/CeD90= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rabbitmq/amqp091-go v1.9.0 h1:qrQtyzB4H8BQgEuJwhmVQqVHB9O4+MNDJCCAcpc3Aoo= github.com/rabbitmq/amqp091-go v1.9.0/go.mod h1:+jPrT9iY2eLjRaMSRHUhc3z14E/l85kv/f+6luSD3pc= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.4.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rubenv/sql-migrate v0.0.0-20181106121204-ba2c6a7295c5/go.mod h1:WS0rl9eEliYI8DPnr3TOwz4439pay+qNgzJoVya/DmY= +github.com/rubenv/sql-migrate v0.0.0-20200429072036-ae26b214fa43/go.mod h1:DCgfY80j8GYL7MLEfvcpSFvjD0L5yZq/aZUJmhZklyg= +github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351/go.mod h1:DCgfY80j8GYL7MLEfvcpSFvjD0L5yZq/aZUJmhZklyg= github.com/rubenv/sql-migrate v1.5.2 h1:bMDqOnrJVV/6JQgQ/MxOpU+AdO8uzYYA/TxFUBzFtS0= github.com/rubenv/sql-migrate v1.5.2/go.mod h1:H38GW8Vqf8F0Su5XignRyaRcbXbJunSWxs+kmzlg0Is= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.0.0-20180905101324-b2a34562d02c/go.mod h1:XvpJiTD8NibaH7z0NzyfhR1+NQDtR9F/x92xheTwC9k= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.5.0/go.mod h1:AkYRkVJF8TkSG/xet6PzXX+l39KhhXa2pdqVSxnTcn4= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.1 h1:4VhoImhV/Bm0ToFkXFi8hXNXwpDRZ/ynw3amt82mzq0= github.com/stretchr/objx v0.5.1/go.mod h1:/iHQpkQwBD6DLUmQ4pE+s1TXdob1mORJ4/UFdrifcy0= +github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/uber/jaeger-client-go v2.16.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.22.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.23.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.24.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.0.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.12.0/go.mod h1:229t1eWu9UXTPmoUkbpN/fctKPBY4IJoFXQnxHGXy6E= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xdg/stringprep v1.0.1-0.20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.mongodb.org/mongo-driver v1.1.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.3.3/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.3.5/go.mod h1:Ual6Gkco7ZGQw8wE1t4tLnvBsf6yVSM60qW6TgOeJ5c= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= @@ -135,30 +764,340 @@ go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPi go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200121082415-34d275377bf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624180213-70d37148ca0c/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191004055002-72853e10c5a3/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200417140056-c07e33ef3290/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200502202811-ed308ab3e770/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.0.0-20190808205415-ced62fe5104b/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/gonum v0.7.0/go.mod h1:L02bwd0sqlsvRv41G7wGWFCsVNZFv/k1xzGIxeANHGM= +gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200225123651-fc8f55426688/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200603110839-e855014d5736/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200604104852-0b0486081ffb/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 h1:AB/lmRny7e2pLhFEYIbl5qkDAUt2h0ZRO4wGPhZf+ik= google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/gorp.v1 v1.7.1/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= +gopkg.in/gorp.v1 v1.7.2/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= +gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= +gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= +gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= +gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= +gopkg.in/ory-am/dockertest.v3 v3.3.2/go.mod h1:s9mmoLkaGeAh97qygnNj4xWkiN7e1SKekYC6CovU+ek= +gopkg.in/ory/dockertest.v3 v3.3.5/go.mod h1:wI78nwA6jQZVXv3va0CcbJAuftRnAa063zO5Fek7+uI= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/ui/service.go b/ui/service.go index 81b63d86..e0123521 100644 --- a/ui/service.go +++ b/ui/service.go @@ -9,16 +9,21 @@ import ( "bytes" "context" "encoding/json" + "fmt" "html/template" + "log" "math" "strings" + "sync" "time" - "golang.org/x/exp/slices" - + "github.com/absmach/agent/pkg/bootstrap" "github.com/absmach/magistrala/pkg/errors" "github.com/absmach/magistrala/pkg/messaging" "github.com/absmach/magistrala/pkg/transformers/senml" + mqtt "github.com/eclipse/paho.mqtt.golang" + mfsenml "github.com/mainflux/senml" + "golang.org/x/exp/slices" sdk "github.com/absmach/magistrala/pkg/sdk/go" ) @@ -1869,87 +1874,87 @@ func (us *uiService) GetRemoteTerminal(id, token string) ([]byte, error) { return btpl.Bytes(), nil } -// func (us *uiService) ProcessTerminalCommand(ctx context.Context, id, tkn, command string, res chan string) error { -// cfg, err := us.sdk.ViewBootstrap(id, tkn) -// if err != nil { -// return errors.Wrap(err, ErrFailedRetreive) -// } - -// var content bootstrap.ServicesConfig - -// if err := json.Unmarshal([]byte(cfg.Content), &content); err != nil { -// return err -// } - -// channels, ok := cfg.Channels.([]sdk.Channel) -// if !ok { -// return errors.New("invalid channels") -// } - -// pubTopic := fmt.Sprintf("channels/%s/messages/req", channels[0].ID) -// subTopic := fmt.Sprintf("channels/%s/messages/res/#", channels[0].ID) - -// opts := mqtt.NewClientOptions().SetCleanSession(true).SetAutoReconnect(true) - -// opts.AddBroker(content.Agent.MQTT.URL) -// if content.Agent.MQTT.Username == "" || content.Agent.MQTT.Password == "" { -// opts.SetUsername(cfg.ThingID) -// opts.SetPassword(cfg.ThingKey) -// } else { -// opts.SetUsername(content.Agent.MQTT.Username) -// opts.SetPassword(content.Agent.MQTT.Password) -// } - -// opts.SetClientID(fmt.Sprintf("ui-terminal-%s", cfg.ThingID)) -// client := mqtt.NewClient(opts) - -// if token := client.Connect(); token.Wait() && token.Error() != nil { -// return token.Error() -// } - -// req := []mfsenml.Record{ -// {BaseName: "1", Name: "exec", StringValue: &command}, -// } -// reqByte, err1 := json.Marshal(req) -// if err1 != nil { -// return err1 -// } - -// token := client.Publish(pubTopic, 0, false, string(reqByte)) -// token.Wait() - -// if token.Error() != nil { -// return token.Error() -// } - -// var wg sync.WaitGroup -// wg.Add(1) -// errChan := make(chan error) - -// client.Subscribe(subTopic, 0, func(_ mqtt.Client, m mqtt.Message) { -// var data []mfsenml.Record -// if err := json.Unmarshal(m.Payload(), &data); err != nil { -// errChan <- err -// } -// res <- *data[0].StringValue -// wg.Done() -// }) - -// select { -// case <-ctx.Done(): -// log.Println("ProcessTerminalCommand canceled") -// case <-time.After(time.Second * 5): -// log.Println("Timeout occurred") -// res <- "timeout" -// case err := <-errChan: -// return err -// case <-res: -// wg.Wait() -// } - -// client.Disconnect(250) -// return nil -// } +func (us *uiService) ProcessTerminalCommand(ctx context.Context, id, tkn, command string, res chan string) error { + cfg, err := us.sdk.ViewBootstrap(id, tkn) + if err != nil { + return errors.Wrap(err, ErrFailedRetreive) + } + + var content bootstrap.ServicesConfig + + if err := json.Unmarshal([]byte(cfg.Content), &content); err != nil { + return err + } + + channels, ok := cfg.Channels.([]sdk.Channel) + if !ok { + return errors.New("invalid channels") + } + + pubTopic := fmt.Sprintf("channels/%s/messages/req", channels[0].ID) + subTopic := fmt.Sprintf("channels/%s/messages/res/#", channels[0].ID) + + opts := mqtt.NewClientOptions().SetCleanSession(true).SetAutoReconnect(true) + + opts.AddBroker(content.Agent.MQTT.URL) + if content.Agent.MQTT.Username == "" || content.Agent.MQTT.Password == "" { + opts.SetUsername(cfg.ThingID) + opts.SetPassword(cfg.ThingKey) + } else { + opts.SetUsername(content.Agent.MQTT.Username) + opts.SetPassword(content.Agent.MQTT.Password) + } + + opts.SetClientID(fmt.Sprintf("ui-terminal-%s", cfg.ThingID)) + client := mqtt.NewClient(opts) + + if token := client.Connect(); token.Wait() && token.Error() != nil { + return token.Error() + } + + req := []mfsenml.Record{ + {BaseName: "1", Name: "exec", StringValue: &command}, + } + reqByte, err1 := json.Marshal(req) + if err1 != nil { + return err1 + } + + token := client.Publish(pubTopic, 0, false, string(reqByte)) + token.Wait() + + if token.Error() != nil { + return token.Error() + } + + var wg sync.WaitGroup + wg.Add(1) + errChan := make(chan error) + + client.Subscribe(subTopic, 0, func(_ mqtt.Client, m mqtt.Message) { + var data []mfsenml.Record + if err := json.Unmarshal(m.Payload(), &data); err != nil { + errChan <- err + } + res <- *data[0].StringValue + wg.Done() + }) + + select { + case <-ctx.Done(): + log.Println("ProcessTerminalCommand canceled") + case <-time.After(time.Second * 5): + log.Println("Timeout occurred") + res <- "timeout" + case err := <-errChan: + return err + case <-res: + wg.Wait() + } + + client.Disconnect(250) + return nil +} func (us *uiService) GetEntities(token, item, name string, page, limit uint64) ([]byte, error) { offset := (page - 1) * limit diff --git a/ui/web/static/css/styles.css b/ui/web/static/css/styles.css index 6208f138..4986a884 100644 --- a/ui/web/static/css/styles.css +++ b/ui/web/static/css/styles.css @@ -42,7 +42,11 @@ display: inline-block; } -.sidebar-brand h2 { +.sidebar-brand { + margin-bottom: 1rem; +} + +.sidebar-brand h1 { font-family: "Montserrat"; } @@ -124,6 +128,7 @@ body.sidebar-toggled { .sidebar.toggled .sidebar-brand { margin-left: 0 !important; + padding-bottom: 3.5rem; } .sidebar.toggled .sidebar-brand h1 { @@ -205,7 +210,7 @@ body.sidebar-toggled .main-content { border-radius: var(--border-radius) !important; } -.doc-button:hover{ +.doc-button:hover { border: 1px solid var(--main-color) !important; border-radius: var(--border-radius) !important; color: #fff !important; @@ -278,6 +283,10 @@ body.sidebar-toggled .main-content { border-radius: 1rem; } +.login-card h1 { + font-family: "Montserrat"; +} + .input-field { background-color: #eaeaea; border-radius: 0.5rem; @@ -446,7 +455,7 @@ button.edit-btn { background: #b2dcef; } -.border-red{ +.border-red { border: 1px solid #eb2f2f; } @@ -523,6 +532,7 @@ button.edit-btn { a.sidebar-brand { height: 4.375rem; margin-left: 0 !important; + margin-bottom: 0 !important; } .sidebar-brand h1 { @@ -549,4 +559,4 @@ button.edit-btn { .table-container .desc-col { display: none; } -} +} \ No newline at end of file diff --git a/ui/web/template/header.html b/ui/web/template/header.html index 91347d9c..8a707e65 100644 --- a/ui/web/template/header.html +++ b/ui/web/template/header.html @@ -5,7 +5,7 @@ - Mainflux + Magistrala
-